def ws_restart_mod(self): """ Places all module statuses into queue when websocket handler is started/restarted """ # This method is only called when websocket is operating # Cycle through all lanes for addr_ln in self.stat_ln: # Only place module statuses into MPQ_WS for operational lanes if not addr_ln['status']: # Get stat_lane from CouchDB modconfig database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln['addr_ln']), logfile=logfile) if not stat_cdb: # Cycle through all modules and place status values into MPQ_WS for dict_mod in data_cdb_out: try: MPQ_WS.put([ 'module', addr_ln['addr_ln'], dict_mod['key'], dict_mod['value']['status'] ]) except queue.Full: log = 'Can not place item in module status queue, queue is full.' logger.exception(log)
def poll_stop(post_data: dict): """ POST handler :param post_data: dict :return data_cdb_out: post_data if not STAT_LVL['op'] :return data_cdb_out: CouchDB data if STAT_LVL['op'] """ # Convert posted data to usable form addr_ln = int(post_data['button_lane']) # Issue request to stop polling command_dict = {'command': 1, 'args': [addr_ln], 'data': []} MPQ_CMD0.put_nowait(command_dict) # Get lane_status document from CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), logfile=logfile, ) if stat_cdb: data_cdb_out = post_data log = 'Could not update web GUI from CouchDB ' + \ 'due to CouchDB error.' logger.warning(log) return data_cdb_out
def on_close( self, reason ): """ Processes to execute when websocket closes """ # Ensure that multiprocess process is terminated if self.mp_ws_listener is not None: if self.mp_ws_listener.is_alive(): self.mp_ws_listener_stop.put_nowait(None) time.sleep(0.1) if self.mp_ws_listener.is_alive(): self.mp_ws_listener.join() # Update base_pid document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='base_pid', data_cdb_in={'websocket_handler': False}, logfile=logfile ) if stat_cdb: log = 'Could not update websocket PID values in process status document due to CouchDB error.' logger.warning(log) MPQ_STAT.put_nowait([ 'websocket', False ])
def location(post_data: dict): """ POST handler :param post_data: dict :return data_cdb_out: post_data if not STAT_LVL['op'] :return data_cdb_out: CouchDB data if STAT_LVL['op'] """ # Convert posted data to usable form mod_uid = post_data['mod_uid'] # This variable gets overwritten on success data_cdb_out = post_data if post_data['loc'] is '': data_cdb_in = {'loc': 'NO LOCATION SET'} else: data_cdb_in = {'loc': post_data['loc']} # Update module document in CouchDB modconfig database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='modconfig', cdb_doc=mod_uid, data_cdb_in=data_cdb_in, logfile=logfile) # Get module document from CouchDB modconfig database if not stat0_cdb: data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='modconfig', cdb_doc=mod_uid, logfile=logfile, ) if not stat1_cdb: data_cdb_out = data1_cdb_out else: log = 'Could not update web GUI from CouchDB due to CouchDB error.' logger.warning(log) return data_cdb_out
def webserver( self ): """ Call main and websocket applications via multiprocessing """ data_cdb_in = {} # Start and run web server main listener try: self.mp_app = multiprocessing.Process( target=self.application, args=() ) self.mp_app.start() log = 'Webserver application started.' logger.info(log) data_cdb_in['webserver_main'] = self.mp_app.pid except multiprocessing.ProcessError: log = 'Can not start main webserver due to multiprocessing error.' logger.exception(log) # Start and run web server websocket listener try: self.mp_ws = multiprocessing.Process( target=self.websocket, args=() ) self.mp_ws.start() log = 'Websocket application started.' logger.info(log) data_cdb_in['webserver_websocket'] = self.mp_ws.pid except multiprocessing.ProcessError: log = 'Can not start websocket due to multiprocessing error.' logger.exception(log) # Update base_pid document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='base_pid', data_cdb_in=data_cdb_in, logfile=logfile ) if stat_cdb: log = 'Could not set PID values in process status document due to CouchDB error.' logger.warning(log)
def store(): """ Stores core time in file :return stat_time: STAT_LVL['op'] or STAT_LVL['op_err'] :return stat_cdb: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_time = STAT_LVL['op'] # Update time document in CouchDB config database data_cdb_out, stat_cdb, code_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='time', data_cdb_in={'time': str(time.time())}, logfile=logfile ) if stat_cdb: log = 'Could not save time due to CouchDB document update error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) MPQ_STAT.put_nowait([ 'base', [ 'couchdb', STAT_LVL['op_err'] ] ]) log = 'Time storage complete.' logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) if not stat_time: MPQ_STAT.put_nowait([ 'base', [ 'tasks', stat_time ] ])
def base(self, mpq_record: list): """ Places base process statuses into websocket and SNMP queues if status has changed. mpq_record[0] = process mpq_record[1] = process status :param mpq_record: list """ process = mpq_record[0] process_stat = mpq_record[1] # Only process record if process status has changed if process_stat != self.stat_base[process]: self.stat_base[process] = process_stat # Initialize base status database entry data_cdb_in = {process: self.stat_base[process]} # Update base_status document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='base_status', data_cdb_in=data_cdb_in, logfile=logfile) if stat_cdb == STAT_LVL['crit']: log = 'Failed to update base_status document in CouchDB config database.' logger.critical(log) # Only place into MPQs if processes/threads are functioning. try: if self.pid_websocket: MPQ_WS.put_nowait( ['base', process, self.stat_base[process]]) # Filter out SNMP-related messages, SNMP can not report on itself. if (process != 'snmp_agent') and (process != 'snmp_notify'): if self.tid_snmp_agent: self.MPQ_SNMPA2.put_nowait( [process, self.stat_base[process]]) if self.tid_snmp_notify: self.MPQ_SNMPN2.put_nowait( [process, self.stat_base[process]]) except queue.Full: log = 'Can not place item in base status queue, queue is full.' logger.exception(log)
def setup(self): """ Setups USB connection to interface board """ # Establish a new persistent USB handle with the interface device. # # Returns False if no error, negative integer for error type. err_iface = self.obj_janus.initialize() if not err_iface: # Get lane call produces error if four-lane interface is not connected. # This is the only way to programmatically differentiate between single- and # four-lane interface boards. # # First return value is 0 through 3 if lane is set, negative integer otherwise. # Second return value is False if no error, negative integer for error type. addr_ln, err_iface = self.obj_janus.i2c_get_lane() # If no error returned, set to four-lane interface if not err_iface: self.addr_ln = addr_ln log = 'Four-lane Janus Interface detected.' logger.info(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'INFO', log]) print(log) else: self.err_iface = 4 log = 'Could not detect Four-lane Janus Interface.' logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'INFO', log]) print(log) # Update core document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='core', data_cdb_in={'interface': self.err_iface}, logfile=logfile) if stat_cdb: log = 'Could not update Janus Interface type in CouchDB.' logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log])
def reset(post_data: dict): """ POST handler :param post_data: dict :return None """ # Convert posted data to usable form addr_ln = int(post_data['lane_address']) data_cdb_out = {} # Issue request to reset lane and execute lane setup MPQ_CMD3.put([addr_ln]) while True: if not MPQ_LN_SETUP.empty(): iface_ln, stat_ch = MPQ_LN_SETUP.get() if (iface_ln == addr_ln) and (stat_ch == 0): # Get lane_status document from CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), logfile=logfile, ) # If CouchDB get has not failed, continue with module upload if not stat_cdb: data_cdb_out['fail'] = False else: data_cdb_out['fail'] = True else: data_cdb_out['fail'] = True break print('Pre manual lane reset: {0}'.format(AUTO_LNRST[addr_ln])) AUTO_LNRST[addr_ln] = 0 print('Post manual lane reset: {0}'.format(AUTO_LNRST[addr_ln])) return data_cdb_out
def down(): """ Determines JanusESS down time :return time_down: int """ # Get time document from CouchDB config database data_cdb_out, stat_cdb, code_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='time', logfile=logfile ) time_down = 0 if not stat_cdb: # The following condition checks that system time is valid if float(time.time()) > float(data_cdb_out['time']): # Compute JanusESS down time time_down = round( (int(float(time.time())) - int(float(data_cdb_out['time']))) / 60, 0 ) log = 'JanusESS downtime was {0} minutes'.format(time_down) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) else: log = 'Could not determine JanusESS down time due to CouchDB document retrieval error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) return time_down
def listener( self ): """ Listener to open when a page websocket connection is established """ try: self.mp_ws_listener = multiprocessing.Process( target=self.websocket, args=() ) self.mp_ws_listener.start() pid_handler = self.mp_ws_listener.pid log = 'Flask websocket handler opened, pid: {0}.'.format(pid_handler) logger.info(log) MPQ_STAT.put_nowait([ 'websocket', self.mp_ws_listener.pid ]) except multiprocessing.ProcessError: pid_handler = False log = 'Can not dispatch heartbeat channel encoded messages ' + \ 'due to multiprocessing error.' logger.exception(log) data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='base_pid', data_cdb_in={'websocket_handler': pid_handler}, logfile=logfile ) if stat_cdb: log = 'Could not update websocket PID values in process status document due to CouchDB error.' logger.warning(log)
def effect( self, led_effect: str, mod_uid: str, addr_ln: int, addr_mod: int, ): """ Disables LEDs on module :param led_effect: string :param mod_uid: string :param addr_ln: int :param addr_mod: int """ data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='modconfig', cdb_doc=mod_uid, logfile=logfile, attempts=1) if not stat0_cdb: if data0_cdb_out['status'] < STAT_LVL['crit']: addr_mem = MMAP[data0_cdb_out['mem_map_ver']]['LED_ALL'][0] MPQ_CMD4.put([ mod_uid, addr_ln, addr_mod, addr_mem, self.dict_led_settings[led_effect] ]) else: log = 'Could not complete module LED {0} process due to CouchDB error.'.format( led_effect) logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log])
def snmp_agent_restart_mod(self): """ Places all module statuses into snmp agent queue when snmp agent is started/restarted """ # This method is only called when SNMP Agent is operating # Cycle through all lanes for addr_ln in self.stat_ln: # Only place module statuses into SNMP_AGENT_MPQ for operational lanes if not addr_ln['status']: # Get stat_lane from CouchDB modconfig database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln['addr_ln']), logfile=logfile) if not stat_cdb: # Cycle through all modules and place status values into SNMP_AGENT_MPQ for dict_mod in data_cdb_out: try: if dict_mod['key'] is not None: self.MPQ_SNMPA4.put([ addr_ln['addr_ln'], dict_mod['key'], dict_mod['value']['mod_type'], dict_mod['value']['mod_ver'], dict_mod['value']['loc'], dict_mod['value']['status'], dict_mod['value']['num_sensors'] ]) except queue.Full: log = 'Can not place item in module status queue, queue is full.' logger.exception(log)
MPQ_STAT.put_nowait(['base', ['logging', STAT_LVL['op']]]) # Check to determine if CouchDB is operating dbase.cdb_check() dbase.mdb_check() # A simple check for corrupted primary CouchDB databases # This check is not a guarantee that databases are corruption-free # If any cannot be restored, then prevent JanusESS start db_list = ['config', 'lanes', 'modules', 'modconfig'] stat_cdb_dbases = dbase.recover(db_list=db_list) appdbase.compact() # Get log document from CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request(cdb_cmd='get_doc', cdb_name='config', cdb_doc='log', logfile=logfile) # Load custom log level from CouchDB into logger dictionary. # This must happen prior to any multiprocessing calls. # After multiprocessing calls must pass log level through queue to process. if not stat_cdb: for log_file in logs: if data_cdb_out[log_file] == 'DEBUG': logging.getLogger(log_file).setLevel(logging.DEBUG) elif data_cdb_out[log_file] == 'INFO': logging.getLogger(log_file).setLevel(logging.INFO) elif data_cdb_out[log_file] == 'ERROR': logging.getLogger(log_file).setLevel(logging.ERROR) elif data_cdb_out[log_file] == 'WARNING': logging.getLogger(log_file).setLevel(logging.WARNING)
def module(self, mpq_record: list): """ Places module statues into websocket and SNMP queues if status has changed. mpq_record[0] = module id mpq_record[1] = lane address mpq_record[2] = module address mpq_record[3] = module status :param mpq_record: list """ uid_mod = mpq_record[0] addr_ln = mpq_record[1] addr_mod = mpq_record[2] stat_mod = mpq_record[3] # Only process for valid module ids, a module with '0000' failed I2C write # during setup # # NOTE: Do not change '0000' here without also changing application.setup.module.setup() if (uid_mod != '0000000000000000000000000000000000000000000000000000000000000000') and \ (uid_mod is not None): print('HEARTBEAT MODULE: {0}'.format(mpq_record)) # Get stat_lane view from CouchDB modconfig database # This determines if lane status should be downgraded if all modules error. data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile) ln_reset_flag = False if not stat0_cdb and not isinstance(data0_cdb_out, int): # Cycle through modules until module is located for dict_mod in data0_cdb_out: if (uid_mod == dict_mod['id']) and (addr_mod == dict_mod['key']): type_mod = dict_mod['value']['mod_type'] ver_mod = dict_mod['value']['mod_ver'] loc_mod = dict_mod['value']['loc'] num_sensors = dict_mod['value']['num_sensors'] # Initialize module database entry mod_errno = dict_mod['value']['errno'] data_cdb_in = {'status': stat_mod} # If module is operational, ensure module config errno field is reset if stat_mod == STAT_LVL['op']: mod_errno = 0 data_cdb_in['errno'] = mod_errno # Modules do not report statuses above STAT_LVL['op_err'], this allows # for heartbeat to initiate module recover processes # # If module reports an operational error, # increment module config errno field if stat_mod == STAT_LVL['op_err']: mod_errno += 1 data_cdb_in['errno'] = mod_errno log = 'Lane {0} module {1} id {2} error incremented to {3}'.\ format(addr_ln, addr_mod, uid_mod, mod_errno) print(log) logger.warning(log) # If three attempts are made to communicate with module, set status to # STAT_LVL['crit'] if mod_errno == 3: print('Pre Auto lane reset var: {0}'.format( AUTO_LNRST)) if AUTO_LNRST[addr_ln] <= 3: AUTO_LNRST[addr_ln] += 1 ln_reset_flag = True else: ln_reset_flag = False print('Post Auto lane reset var: {0}'.format( AUTO_LNRST)) data_cdb_in['status'] = STAT_LVL['crit'] stat_mod = STAT_LVL['crit'] log = 'Lane {0} module {1} '.format(addr_ln, addr_mod, uid_mod) +\ 'id {0} error incremented to {1}, '.format(uid_mod, mod_errno) +\ 'now considered failed.' logger.critical(log) # Update module document in CouchDB modconfig database. # This document must be updated prior to any module # recovery actions are executed. data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='modconfig', cdb_doc=uid_mod, data_cdb_in=data_cdb_in, logfile=logfile) # Execute lane/module recovery routine if module failed # # TODO: When designed, add module re-setup function here if (not stat1_cdb) and (mod_errno == 3) and \ (stat_mod == STAT_LVL['crit']): # MPQ_CMD3.put_no_wait([ # addr_ln # ]) print('Reset lane called.') if ln_reset_flag and not FLAG_LNRST[addr_ln]: MPQ_CMD3.put_nowait([addr_ln]) # Only place into MPQs if processes/threads are functioning. try: if self.pid_websocket: MPQ_WS.put( ['module', addr_ln, addr_mod, stat_mod]) if addr_mod is not None: if self.tid_snmp_agent: self.MPQ_SNMPA4.put([ addr_ln, addr_mod, type_mod, ver_mod, loc_mod, stat_mod, num_sensors ]) if self.tid_snmp_notify: self.MPQ_SNMPN4.put([ addr_ln, addr_mod, type_mod, ver_mod, loc_mod, stat_mod, num_sensors ]) except queue.Full: log = 'Can not place item in module status queue, queue is full.' logger.exception(log) break # Cycle through modules to determine if lane is inop stat_ln = STAT_LVL['crit'] for dict_mod in data0_cdb_out: if dict_mod['value']['status'] <= STAT_LVL['op_err']: stat_ln = STAT_LVL['op'] break if stat_ln: self.stat_ln[addr_ln]['status'] = stat_ln try: if self.pid_websocket: MPQ_WS.put([ 'lane', addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'], self.stat_ln[addr_ln]['setup_id'] ]) if self.tid_snmp_agent: self.MPQ_SNMPA3.put([ addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'] ]) if self.tid_snmp_notify: self.MPQ_SNMPN3.put([ addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'] ]) except queue.Full: log = 'Can not place item in lane status queue, queue is full.' logger.exception(log) else: MPQ_CMD3.put_no_wait([addr_ln]) print('Reset lane called.') if uid_mod is None: if self.tid_snmp_agent: self.MPQ_SNMPA4.put( [addr_ln, addr_mod, 'Null', 'Null', 'Null', stat_mod, 0])
def lane(self, mpq_record: list): """ Places lane statues into websocket and SNMP queues if status has changed. mpq_record[0] = lane address mpq_record[1] = lane dictionary :param mpq_record: list """ stat_change = False setup_change = False addr_ln = mpq_record[0] dict_ln = mpq_record[1] # Only process record if key values have changed for key in dict_ln.keys(): if dict_ln[key] != self.stat_ln[addr_ln][key]: self.stat_ln[addr_ln][key] = dict_ln[key] stat_change = True if key == 'setup_id': setup_change = True # If any key value has changed, process record if stat_change: # Update lane document in CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), data_cdb_in=self.stat_ln[addr_ln], logfile=logfile) if stat_cdb == STAT_LVL['crit']: log = 'Failed to update lane{0}_status document in CouchDB lanes database.'.\ format(addr_ln) logger.critical(log) if setup_change: MPQ_LN_SETUP.put_nowait( [addr_ln, self.stat_ln[addr_ln]['status']]) if self.stat_ln[addr_ln]['poll']: MPQ_STAT.put_nowait( ['base', ['poll_data', STAT_LVL['not_cfg']]]) # Only place into MPQs if processes/threads are functioning. try: if self.pid_websocket: MPQ_WS.put([ 'lane', addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'], self.stat_ln[addr_ln]['setup_id'] ]) if self.tid_snmp_agent: self.MPQ_SNMPA3.put([ addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'] ]) if self.tid_snmp_notify: self.MPQ_SNMPN3.put([ addr_ln, self.stat_ln[addr_ln]['status'], self.stat_ln[addr_ln]['last_module'], self.stat_ln[addr_ln]['poll'], self.stat_ln[addr_ln]['last_dtg'] ]) except queue.Full: log = 'Can not place item in lane status queue, queue is full.' logger.exception(log)
def dispatcher(): """ Automatically dispatches polling commands into MPQ_CMD5 priority queue """ stat_cdb = STAT_LVL['op'] stat_cdb_prev = STAT_LVL['not_cfg'] cfg_poll = [ STAT_LVL['not_cfg'], STAT_LVL['not_cfg'], STAT_LVL['not_cfg'], STAT_LVL['not_cfg'] ] while not stat_cdb: # Cycle through all lanes for addr_ln in range(0, 4): # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_POLL_LOG_DISP.empty(): mpq_record = MPQ_POLL_LOG_DISP.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) # Set polling status flag for this lane if start command is issued # on MPQ_POLL_START if not MPQ_POLL_START.empty(): mpq_record = MPQ_POLL_START.get() cfg_poll[mpq_record[0]] = STAT_LVL['op'] MPQ_STAT.put_nowait([ 'lane', [ mpq_record[0], {'poll': cfg_poll[mpq_record[0]]} ] ]) # Send polling start messaging with timeout in seconds send_mail( msg_type='poll_start', args=[mpq_record[0]], ) # If polling status flag is set, execute polling for lane if not cfg_poll[addr_ln]: # Get lane_status document from CouchDB lanes database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), logfile=logfile ) # Check that both lane status and polling have not failed if (not stat0_cdb) and (data0_cdb_out['status'] < STAT_LVL['crit']) and \ (data0_cdb_out['poll'] < STAT_LVL['crit']): # Get stat_lane view from CouchDB modconfig database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile ) if not stat1_cdb: # Cycle through all modules connected to lane # If a module has not failed, initiate polling actions for dict_mod in data1_cdb_out: if dict_mod['value']['status'] < STAT_LVL['crit']: # Developer code to check for speed print('priority 5 interface added') time_m = time.time() # Immediate stop polling for this lane # if stop command is issued on MPQ_POLL_STOP if not MPQ_POLL_STOP.empty(): mpq_record = MPQ_POLL_STOP.get() cfg_poll[mpq_record] = STAT_LVL['not_cfg'] log = 'Lane {0} polling exit command executed.'. \ format(mpq_record) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) MPQ_STAT.put_nowait([ 'lane', [ mpq_record, {'poll': cfg_poll[mpq_record]} ] ]) # Send polling stop messaging with timeout in seconds send_mail( msg_type='poll_stop', args=[mpq_record], ) if mpq_record == addr_ln: break # Issue request to poll this module MPQ_CMD5.put([ dict_mod['id'], addr_ln, dict_mod['key'] ]) # Immediate stop polling for this lane # if stop command is issued on MPQ_POLL_STOP. # Second check ensures quicker response time. if not MPQ_POLL_STOP.empty(): mpq_record = MPQ_POLL_STOP.get() cfg_poll[mpq_record] = STAT_LVL['not_cfg'] log = 'Lane {0} polling exit command executed.'.format(mpq_record) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) MPQ_STAT.put_nowait([ 'lane', [ mpq_record, {'poll': cfg_poll[mpq_record]} ] ]) # Send polling stop messaging with timeout in seconds send_mail( msg_type='poll_stop', args=[mpq_record], ) if mpq_record == addr_ln: break log = 'Lane {0} module {1} poll added to job queue.'. \ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) # Determine if all module sensor data processing is # complete prior to proceeding. This pause is here # to prevent the poll dispatcher from getting too far # ahead of module sensor data processing. while True: if not MPQ_POLL_COMPLETE.empty(): mpq_record = MPQ_POLL_COMPLETE.get() if (mpq_record[0] == addr_ln) and \ (mpq_record[1] == dict_mod['key']): log = 'Lane {0} module {1} automated poll completed.'.\ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) break time.sleep(0.02) # Developer code to check for speed print('Module {0} cycle complete: {1}'. format(dict_mod['key'], round((time.time() - time_m), 3))) log = 'Lane {0} module {1} poll dispatch cycle complete.'. \ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) else: stat_cdb = stat1_cdb last_dtg = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(time.time()) ) MPQ_STAT.put_nowait([ 'lane', [ addr_ln, {'last_dtg': last_dtg} ] ]) time.sleep(15) else: stat_cdb = stat0_cdb time.sleep(1) # Determine any changes in CouchDB status and report for both # CouchDB and poll dispatcher. CouchDB is only cause for change in # poll_dispatcher status if stat_cdb != stat_cdb_prev: stat_cdb_prev = stat_cdb MPQ_STAT.put_nowait([ 'base', [ 'couchdb', stat_cdb ] ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_dispatch', stat_cdb ] ]) log = 'Polling dispatcher failed due to CouchDB error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ])
def smtp_janusess_start( dict_core: dict, time_down: int ): """ Prepares formatted SMTP message for JanusESS system start :param dict_core: dict :param time_down: int :return dict_smtp: dict :return stat_smtp_temp: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_smtp_temp = STAT_LVL['op'] dict_smtp = None # Get all documents from CouchDB lanes database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_all', cdb_name='lanes', logfile=logfile ) if not stat0_cdb: body_smtp = """ JanusESS {0} (v{1}) start sequence complete at {2}. System was down for {3} minute(s). """.\ format( dict_core['name'], dict_core['version'], datetime.now().strftime("%Y-%m-%d %H:%M:%S"), time_down ) # Cycle through lanes to build lane status message for addr_ln in range(0, 4): # If lane status is not configured, then add the following statement if data0_cdb_out[addr_ln]['status'] >= STAT_LVL['not_cfg']: body_smtp += """ ======================== Lane {0} is not setup. """.format(addr_ln) # If lane status is operational, then add the following statement elif data0_cdb_out[addr_ln]['status'] == STAT_LVL['op']: body_smtp += """ ======================== Lane {0} is operational. Modules connected: {1} ------------------------ """.\ format( addr_ln, data0_cdb_out[addr_ln]['last_module'] ) # Get lane status view from CouchDB modconfig database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile, ) if not stat1_cdb: # Cycle through module statuses and add status messages to body for addr_mod in data1_cdb_out: stat_mod = '' if addr_mod['value']['status'] == 0: stat_mod = "Operational" elif addr_mod['value']['status'] == 1: stat_mod = "Operational Event" elif addr_mod['value']['status'] == 2: stat_mod = "Operational Error" elif addr_mod['value']['status'] == 3: stat_mod = "Critical Failure" elif addr_mod['value']['status'] == 4: stat_mod = "Not Setup" elif addr_mod['value']['status'] == 5: stat_mod = "Configuration Error" elif addr_mod['value']['status'] == 6: stat_mod = "Undetermined" elif addr_mod['value']['status'] == 7: stat_mod = "Not Tracked" body_smtp += """ Module {0}: {1}""".\ format( addr_mod['key'], stat_mod ) # If polling is operational, add the following statements if data0_cdb_out[addr_ln]['poll'] < STAT_LVL['crit']: body_smtp += """ Lane {0} polling unexpectedly stopped on {1}. Polling will automatically restart. """.\ format( addr_ln, data0_cdb_out[addr_ln]['last_dtg'] ) # If polling is not operational, add the following statement else: body_smtp += """ Lane {0} is not polling. """.\ format(addr_ln) dict_smtp = { 'smtp_subject': 'DO NOT REPLY JanusESS {0} (v{1}): NOTICE! JanusESS started'. format( dict_core['name'], dict_core['version'] ), 'smtp_body': body_smtp, 'smtp_distribution': 's' } log = 'JanusESS start messages built.' logger.debug(log) else: log = 'Failed to build JanusESS start SMTP message due to CouchDB error.' logger.debug(log) stat_smtp_temp = STAT_LVL['crit'] return dict_smtp, stat_smtp_temp
def sms_status_dispatch( dict_core: dict ): """ Prepares formatted SMS message for statuses :param dict_core: dict :return dict_sms: dict :return stat_sms_temp: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_sms_temp = STAT_LVL['op'] dict_sms = None # Get tasks document from CouchDB config database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='update', logfile=logfile, ) # Get tasks document from CouchDB config database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='network', logfile=logfile, ) # Get all documents from CouchDB lanes database data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request( cdb_cmd='get_all', cdb_name='lanes', logfile=logfile ) if not stat0_cdb and not stat1_cdb and not stat2_cdb: body_sms = "{0}-hour update: ".format(data0_cdb_out['updateemail_interval']) # Cycle through lanes to build lane status message for addr_ln in range(0, 4): if data2_cdb_out[addr_ln]['status'] > STAT_LVL['op']: body_sms += "Ln {0} is inop. ".format(addr_ln) elif data2_cdb_out[addr_ln]['status'] == STAT_LVL['op']: body_sms += "Ln {0} is op, {1} mod ". \ format( addr_ln, data2_cdb_out[addr_ln]['last_module'] ) # If polling is not setup, add the following statement if data2_cdb_out[addr_ln]['poll'] > STAT_LVL['crit']: body_sms += "(not poll). ".format(addr_ln) # If polling is operational, add the following statement elif data2_cdb_out[addr_ln]['poll'] == STAT_LVL['op']: body_sms += "(poll). ".format(addr_ln) # If polling failed, add the following statement elif data2_cdb_out[addr_ln]['status'] == STAT_LVL['crit']: body_sms += "(poll failed). ".format(addr_ln) dict_sms = { 'sms_subject': '{0} Status'.format(dict_core['name']), 'sms_body': body_sms, 'sms_distribution': 's' } log = '{0}-hour status SMS messaging built.'.format(data0_cdb_out['updateemail_interval']) logger.debug(log) else: log = 'Failed to build JanusESS start SMS message due to CouchDB error.' logger.debug(log) stat_sms_temp = STAT_LVL['crit'] return dict_sms, stat_sms_temp
def send_mail( msg_type: str, args: list, ): """ Sends messaging message :param msg_type: str :param args: list """ # Get base_status document from CouchDB config database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='base_status', logfile=logfile) # Get email document from CouchDB config database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='email', logfile=logfile, ) # Get sms document from CouchDB config database data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='sms', logfile=logfile, ) # If messaging is enabled by user and network is operational, # then build template and send messaging if not stat0_cdb and not stat1_cdb and not stat2_cdb: if data1_cdb_out['smtp_enable'] and not data0_cdb_out['network']: dict_msg, stat_msg_temp = templates.message_templates( sms_enable=data2_cdb_out['sms_enable'], msg_type=msg_type, args=args) # Uncomment to test messaging text formatting # print("\n\n\n") # print(dict_msg['smtp_subject']) # print(dict_msg['smtp_body']) # print("\n\n\n") # print(dict_msg['sms_subject']) # print(dict_msg['sms_body']) # print("\n\n\n") # Comment this block to test message text formatting if not stat_msg_temp: try: email_mp = multiprocessing.Process(target=send, args=(data1_cdb_out, data2_cdb_out, dict_msg)) email_mp.start() except multiprocessing.ProcessError: log = 'Can not send email due to multiprocessing error.' logger.exception(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'ERROR', log]) MPQ_STAT.put_nowait(['base', ['email', STAT_LVL['crit']]]) else: log = 'Email not attempted, email template build failed.' logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'WARNING', log]) MPQ_STAT.put_nowait(['base', ['email', stat_msg_temp]]) elif not data1_cdb_out['smtp_enable']: log = 'Email disabled by user.' logger.debug(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) MPQ_STAT.put_nowait(['base', ['email', STAT_LVL['not_cfg']]]) else: log = 'Email not attempted, latest network check shows network ' + \ 'disconnected.' logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['email', STAT_LVL['not_cfg']]]) else: log = 'Email not attempted due to CouchDB error.' logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) MPQ_STAT.put_nowait(['base', ['email', STAT_LVL['not_cfg']]])
def listener(): """ Listener for commands to execute """ from application.polling import flag, poll # Initialize operations and Janus interface libraries # Enable interrupts immediately obj_cmd = Command() obj_iface = Interface() obj_iface.interrupts_enable() # Send message to JanusESS main to proceed with JanusESS startup procedures MPQ_IFACE_SETUP.put_nowait(obj_iface.error_iface()) stat_cmd_prev = STAT_LVL['not_cfg'] # This while loop has no exit, JanusESS will not function without this # ongoing loop to check the following command queues in priority order: # # MPQ_CMD0: User-initiated command requests # MPQ_CMD1: Checks for neighbor-bus triggers # MPQ_CMD2: User-initiated module sensor polling # MPQ_CMD3: Lane/module initialization and setup routine # MPQ_CMD4: Upload module configuration to module # MPQ_CMD5: Recurring module sensor polling while True: stat_cmd = STAT_LVL['op'] # User-initiated command requests if not MPQ_CMD0.empty(): data_cmd0_in = MPQ_CMD0.get() log = 'Priority 0 command, command #{0} request received.'.\ format(data_cmd0_in['command']) logger.debug(log) log = 'Command {0} called.'.format(data_cmd0_in['command']) logger.info(log) if data_cmd0_in['command'] == 'log_level': if data_cmd0_in['args'][0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif data_cmd0_in['args'][0] == 'INFO': logger.setLevel(logging.INFO) elif data_cmd0_in['args'][0] == 'ERROR': logger.setLevel(logging.ERROR) elif data_cmd0_in['args'][0] == 'WARNING': logger.setLevel(logging.WARNING) elif data_cmd0_in['args'][0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) else: try: th_cmd0 = threading.Thread( target=obj_cmd.exec_cmd, args=( data_cmd0_in['command'], data_cmd0_in['args'], data_cmd0_in['data'], ) ) th_cmd0.start() except threading.ThreadError: stat_cmd = STAT_LVL['op_err'] log = 'Could not start user-initiated command due to threading error.' logger.exception(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) log = 'Priority 0 interface request concluded.' logger.info(log) # Checks for neighbor-bus triggers # # This command is only executed if a trigger flag is discovered during # recurring module sensor polling elif not MPQ_CMD1.empty(): MPQ_CMD1.get() log = 'Priority 1 interface request received.' logger.debug(log) # Get all documents from CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_all', cdb_name='lanes', logfile=logfile ) if not stat_cdb: # Cycle through lanes, if lane and lane # polling are operational, then continue procedure # to check module triggers for addr_ln in range(0, 4): if (data_cdb_out[addr_ln]['status'] < STAT_LVL['crit']) \ and (data_cdb_out[addr_ln]['poll'] < STAT_LVL['crit']): # Set four-port interface lane. This function # ignores single-port interface devices. stat_iface = obj_iface.i2c_lane_set(addr_ln=addr_ln) if not stat_iface: # Get stat_chan view from CouchDB # modconfig database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_chan{0}'.format(addr_ln), logfile=logfile, ) # Cycle through each non-failed module connected # to the lane if not stat_cdb: for dict_mod in data_cdb_out: if dict_mod['value']['status'] < STAT_LVL['crit']: # Call function to check an # individual module'strigger status evt_byte = MMAP[dict_mod['value']['mem_map_ver']]['M_EVT'] stat_mod = flag.interrupt( obj_iface=obj_iface, addr_ln=addr_ln, addr_mod=dict_mod['key'], evt_byte=evt_byte ) MPQ_STAT.put_nowait([ 'module', [ dict_mod['id'], addr_ln, dict_mod['key'], stat_mod ] ]) else: log = 'Could not check module interrupt flag due to CouchDB error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_cmd = STAT_LVL['op_err'] else: stat_cmd = STAT_LVL['op_err'] log = 'Could not complete priority 1 interface request ' + \ 'on lane {0} due to i2c lane '.format(addr_ln) +\ 'set error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) obj_iface.interrupt_clear_flag() log = 'Priority 1 interface request concluded.' logger.info(log) else: log = 'Could not complete priority 1 interface request due to CouchDB error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_cmd = STAT_LVL['op_err'] # User-initiated module sensor polling # # This command only polls one module per request elif not MPQ_CMD2.empty(): data_cmd2_in = MPQ_CMD2.get() uid_mod = data_cmd2_in[0] addr_ln = data_cmd2_in[1] addr_mod = data_cmd2_in[2] log = 'Lane {0} module {1} priority 2 interface request received.'.format(addr_ln, addr_mod) logger.info(log) # Set four-port interface lane. This function ignores # single-port interface devices. stat_iface = obj_iface.i2c_lane_set(addr_ln=addr_ln) if not stat_iface: stat_poll_data, uid_mod_i2c = poll.get_data( obj_iface=obj_iface, uid_mod=uid_mod, addr_ln=addr_ln, addr_mod=addr_mod ) if not stat_poll_data: MPQ_STAT.put_nowait([ 'base', [ 'poll_data', STAT_LVL['op'] ] ]) stat_iface, flag = obj_iface.interrupt_check_flag() if flag: MPQ_CMD1.put(True) if not stat_iface: log = 'Lane {0} module {1} on-demand poll completed.'.format(addr_ln, addr_mod) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) # USB reset interface board if bad data returned # # TODO: Need higher level tracking of this error # TODO: Do not wish to reset device more than once # TODO: If reset after first time fails, all # TODO: related commands will be bypassed elif (stat_poll_data == STAT_LVL['op_err']) and \ (uid_mod_i2c != uid_mod): obj_iface.setup() obj_iface.interrupts_enable() stat_cmd = STAT_LVL['op_err'] log = 'Resetting interface due to mismatch in module id: ' + \ 'requested={0} vs polled={1}.'.format(uid_mod, uid_mod_i2c) logger.warning(log) else: log = 'Could not complete priority 2 interface request on ' + \ 'lane {0} module {1} '.format(addr_ln, addr_mod) +\ 'due to i2c lane set error.' logger.critical(log) stat_cmd = STAT_LVL['op_err'] log = 'Lane {0} module '.format(addr_ln) +\ '{0} priority 2 interface request concluded.'.format(addr_mod) logger.info(log) # Lane/module initialization and setup routine elif not MPQ_CMD3.empty(): data_cmd3_in = MPQ_CMD3.get() addr_ln = data_cmd3_in[0] # FLAG_LNRST[addr_ln] = True log = 'Lane {0} priority 3 interface request received.'. \ format(addr_ln) logger.debug(log) log = 'Begin lane {0} network reset and initialization.'.\ format(addr_ln) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) # Call lane reset command to toggle GPIO pins stat_iface = lane.reset( obj_iface=obj_iface, addr_ln=addr_ln ) if not stat_iface: # Call lane init command to setup any modules # connected to the lane stat_ch, stat_cdb = lane.init( obj_iface=obj_iface, addr_ln=addr_ln ) if not stat_ch: # Ensure that all interrupt flags are cleared prior # to any other lane activity. GPIO interrupts may # get triggered during lane setup routines. stat_iface = obj_iface.interrupt_clear_flag() if not stat_iface: log = 'Interrupt flags successfully cleared.' logger.debug(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) if not stat_cdb: log = 'Lane {0} network reset and initialization complete.'.format(addr_ln) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Lane {0} network reset and '.format(addr_ln) + \ 'initialization complete with CouchDB errors.' logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Could not clear interrupt flags from interface due to interface error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Lane {0} network reset and initialization failed to complete.'.format(addr_ln) logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Could not initialize lane {0} network due to Neighbor Bus reset error.'.format(addr_ln) logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) # FLAG_LNRST[addr_ln] = False log = 'Lane {0} priority 3 interface request concluded.'.format(addr_ln) logger.info(log) # Upload module configuration to module elif not MPQ_CMD4.empty(): while not MPQ_CMD4.empty(): data_cmd4_in = MPQ_CMD4.get() uid_mod = data_cmd4_in[0] addr_ln = data_cmd4_in[1] addr_mod = data_cmd4_in[2] addr_mem = data_cmd4_in[3] data_iface_in = data_cmd4_in[4] log = 'Lane {0} module '.format(addr_ln) +\ '{0} priority 4 interface request received.'.format(addr_mod) logger.debug(log) stat_mod = STAT_LVL['op'] # Set four-port interface lane. This function ignores # single-port interface devices. stat_iface = obj_iface.i2c_lane_set(addr_ln=addr_ln) if not stat_iface: stat_iface = obj_iface.i2c_write( addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=addr_mem, data_iface_in=data_iface_in ) if stat_iface: stat_mod = STAT_LVL['crit'] stat_cmd = STAT_LVL['op_err'] log = 'Upload of module settings to lane {0} '.format(addr_ln) +\ 'module {0} unsuccessful.'.format(addr_mod) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) else: log = 'Upload of module settings to lane {0} '.format(addr_ln) +\ 'module {0} successful.'.format(addr_mod) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) logger.log( logging.INFO if not stat_iface else logging.CRITICAL, log ) print(log) MPQ_STAT.put_nowait([ 'module', [ uid_mod, addr_ln, addr_mod, stat_mod ] ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Could not complete priority 4 interface request on ' + \ 'lane {0}, '.format(addr_ln) +\ 'module {0} due to i2c lane set error.'.format(addr_mod) logger.critical(log) log = 'Lane {0} module '.format(addr_ln) +\ '{0} priority 4 interface request concluded.'.format(addr_mod) logger.info(log) # Recurring module sensor polling # # While this command algorithm is essentially identical # to MPQ_CMD2 algorithm, it remains separate so that any # user-initiated polling request upon an individual # module will receive a much higher priority so that # execution takes place more quickly. elif not MPQ_CMD5.empty(): time_a = time.time() data_cmd5_in = MPQ_CMD5.get() uid_mod = data_cmd5_in[0] addr_ln = data_cmd5_in[1] addr_mod = data_cmd5_in[2] log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) +\ 'priority 5 interface request received.' logger.info(log) # Set four-port interface lane. This function ignores # single-port interface devices. stat_iface = obj_iface.i2c_lane_set(addr_ln=addr_ln) if not stat_iface: time_b = time.time() stat_poll_data, uid_mod_i2c = poll.get_data( obj_iface=obj_iface, uid_mod=uid_mod, addr_ln=addr_ln, addr_mod=addr_mod ) print('Lane {0} module {1} priority 5 get_data time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_b), 3))) # Check for any interrupts on all lanes before polling again if not stat_poll_data: MPQ_STAT.put_nowait([ 'base', [ 'poll_data', STAT_LVL['op'] ] ]) stat_iface, flag = obj_iface.interrupt_check_flag() if flag: MPQ_CMD1.put(True) # USB reset interface board if bad data returned # # TODO: Need higher level tracking of this error # TODO: Do not wish to reset device more than once # TODO: If reset after first time fails, all # TODO: related commands will be bypassed elif (stat_poll_data == STAT_LVL['op_err']) and \ (uid_mod_i2c != uid_mod): obj_iface.setup() obj_iface.interrupts_enable() stat_cmd = STAT_LVL['op_err'] log = 'Resetting interface due to mismatch in module id: ' + \ 'requested={0} vs polled={1}.'.format(uid_mod, uid_mod_i2c) logger.warning(log) MPQ_POLL_COMPLETE.put_nowait([ addr_ln, addr_mod ]) else: stat_cmd = STAT_LVL['op_err'] log = 'Could not complete priority 5 interface request on lane ' + \ '{0} module {1} '.format(addr_ln, addr_mod) +\ 'due to i2c lane set error.' logger.critical(log) log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) +\ 'priority 5 interface request concluded.' logger.info(log) print('Lane {0} module {1} priority 5 time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_a), 3))) time.sleep(0.05) # If command listener status has changed, send heartbeat update if stat_cmd != stat_cmd_prev: stat_cmd_prev = stat_cmd MPQ_STAT.put_nowait([ 'base', [ 'command_listener', stat_cmd ] ])
def poll(self, mpq_record: list): """ Places polling values into websocket and SNMP queues as they are received. :param mpq_record: list mpq_record[0][0] = module id mpq_record[0][1] = customer id mpq_record[0][2] = base unit id mpq_record[0][3] = module location mpq_record[0][4] = lane address mpq_record[0][5] = module address mpq_record[1][0] = sensor address mpq_record[1][1] = alert type mpq_record[1][2] = alert enabled mpq_record[1][3] = sensor type mpq_record[1][4] = sensor value mpq_record[1][5] = sensor value dtg mpq_record[1][6] = sensor value unit mpq_record[1][7] = trigger mpq_record[1][8] = trigger interval mpq_record[1][9] = trigger step """ from shared.messaging.smtp import send_mail uid_mod = mpq_record[0][0] mod_loc = mpq_record[0][3] addr_ln = mpq_record[0][4] addr_mod = mpq_record[0][5] addr_s = mpq_record[1][0] alert_type = mpq_record[1][1] s_type = mpq_record[1][3] converted_val = mpq_record[1][4] data_dtg = mpq_record[1][5] unit = mpq_record[1][6] trigger = mpq_record[1][7] trig_int = mpq_record[1][8] trig_step = mpq_record[1][9] # Cycle through poll_value list to search for matching module and sensor # If found process alert value mod_found = False message = False msg_type = None msg_args = None # Get lane_status document from CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='dataunits', logfile=logfile) # Convert sensor value to user-selected unit s_value, s_unit = self.unit_convert.convert(converted_val, unit, data_cdb_out[s_type]) # Convert low-threshold value to user-selected unit t_value, t_unit = self.unit_convert.convert(trigger, unit, data_cdb_out[s_type]) for dict_alert in self.poll_val: if dict_alert['mod_uid'] == uid_mod: # A new alert period is triggered if (dict_alert['sensors'][addr_s]['type'] == 'off') and (alert_type != 'off'): dict_alert_hist = { 'trigger': t_value, 'value': s_value, 'unit': s_unit, 'data_dtg': data_dtg } dict_alert['sensors'][addr_s]['prev_value'] = s_value dict_alert['sensors'][addr_s]['value'] = s_value dict_alert['sensors'][addr_s]['type'] = alert_type dict_alert['sensors'][addr_s]['data_dtg'] = data_dtg dict_alert['sensors'][addr_s]['history'].append( dict_alert_hist) # Email settings for reporting new alert period msg_type = 'alert_new' msg_args = [[], 'polling'] message = True # Service existing low alert elif (dict_alert['sensors'][addr_s]['type'] == 'low') and (alert_type == 'low'): # Existing low alert is stable if (dict_alert['sensors'][addr_s]['value'] > (dict_alert['sensors'][addr_s]['prev_value'] - trig_step)) and \ (dict_alert['sensors'][addr_s]['value'] < (dict_alert['sensors'][addr_s]['prev_value'] + trig_step)): # Email setting for reporting stable low alert msg_type = 'alert_stable' # Existing low alert with increased value elif dict_alert['sensors'][addr_s]['value'] >= \ (dict_alert['sensors'][addr_s]['prev_value'] + trig_step): dict_alert['sensors'][addr_s]['prev_value'] = s_value msg_type = 'alert_increased' # Existing low alert with decreased value elif dict_alert['sensors'][addr_s]['value'] <= \ (dict_alert['sensors'][addr_s]['prev_value'] - trig_step): dict_alert['sensors'][addr_s]['prev_value'] = s_value msg_type = 'alert_decreased' # If trigger interval has elapsed, conduct reporting if data_dtg >= (dict_alert['sensors'][addr_s]['data_dtg'] + trig_int): dict_alert_hist = { 'trigger': t_value, 'value': s_value, 'unit': s_unit, 'data_dtg': data_dtg } dict_alert['sensors'][addr_s]['value'] = s_value dict_alert['sensors'][addr_s]['type'] = alert_type dict_alert['sensors'][addr_s]['data_dtg'] = data_dtg dict_alert['sensors'][addr_s]['history'].append( dict_alert_hist) msg_args = [[], dict_alert['sensors'][addr_s]['history'], 'polling'] message = True # Service existing high alert elif (dict_alert['sensors'][addr_s]['type'] == 'high') and (alert_type == 'high'): # Existing high alert is stable if (dict_alert['sensors'][addr_s]['value'] > (dict_alert['sensors'][addr_s]['prev_value'] - trig_step)) and \ (dict_alert['sensors'][addr_s]['value'] < (dict_alert['sensors'][addr_s]['prev_value'] + trig_step)): # Email setting for reporting stable high alert msg_type = 'alert_stable' # Existing high alert with increased value elif dict_alert['sensors'][addr_s]['value'] >= \ (dict_alert['sensors'][addr_s]['prev_value'] + trig_step): dict_alert['sensors'][addr_s]['prev_value'] = s_value msg_type = 'alert_increased' # Existing high alert with decreased value elif dict_alert['sensors'][addr_s]['value'] <= \ (dict_alert['sensors'][addr_s]['prev_value'] - trig_step): dict_alert['sensors'][addr_s]['prev_value'] = s_value msg_type = 'alert_decreased' # If trigger interval has elapsed, conduct reporting if data_dtg >= (dict_alert['sensors'][addr_s]['data_dtg'] + trig_int): dict_alert_hist = { 'trigger': t_value, 'value': s_value, 'unit': s_unit, 'data_dtg': data_dtg } dict_alert['sensors'][addr_s]['value'] = s_value dict_alert['sensors'][addr_s]['type'] = alert_type dict_alert['sensors'][addr_s]['data_dtg'] = data_dtg dict_alert['sensors'][addr_s]['history'].append( dict_alert_hist) msg_args = [[], dict_alert['sensors'][addr_s]['history'], 'polling'] message = True # Existing alert period is ended elif (dict_alert['sensors'][addr_s]['type'] != 'off') and (alert_type == 'off'): dict_alert['sensors'][addr_s]['prev_value'] = None dict_alert['sensors'][addr_s]['value'] = None dict_alert['sensors'][addr_s]['type'] = alert_type dict_alert['sensors'][addr_s]['data_dtg'] = None dict_alert['sensors'][addr_s]['history'] = [] # Report end of alert period msg_type = 'alert_cancel' msg_args = [[], 'polling'] message = True mod_found = True break # A new alert period is issued for a new module if not mod_found: dict_mod = {'mod_uid': uid_mod, 'sensors': []} # Cycle through all possible sensors to build alert structure # Some sensors are either not installed or may never issue alert data for sensor_id in range(0, 17): dict_alert = { 'data_dtg': None, 'type': 'off', 'prev_value': None, 'value': None, 'unit': None, 'history': [] } dict_mod['sensors'].append(dict_alert) # Update data for specific sensor number if alert_type != 'off': dict_mod['sensors'][addr_s]['data_dtg'] = data_dtg dict_mod['sensors'][addr_s]['type'] = alert_type dict_mod['sensors'][addr_s]['prev_value'] = s_value dict_mod['sensors'][addr_s]['value'] = s_value dict_history = { 'trigger': t_value, 'value': s_value, 'unit': s_unit, 'data_dtg': data_dtg } dict_mod['sensors'][addr_s]['history'].append(dict_history) # Email settings for reporting new alert period msg_type = 'alert_new' msg_args = [mpq_record, 'polling'] message = True self.poll_val.append(dict_mod) # If changes take place in alert status, send messaging message if message: msg_args[0] = [ uid_mod, mod_loc, addr_ln, addr_mod, addr_s, alert_type, s_type, s_value, data_dtg, s_unit, t_value, ] send_mail( msg_type=msg_type, args=msg_args, ) # Only place into MPQs if processes/threads are functioning. try: if self.pid_websocket: MPQ_WS.put([ 'poll', addr_ln, addr_mod, addr_s, mod_loc, s_type, s_value, s_unit, strftime("%H:%M:%S", time.localtime(float(data_dtg))), alert_type, t_value, ]) if self.tid_snmp_agent: self.MPQ_SNMPA5.put([ uid_mod, addr_ln, addr_mod, addr_s, s_type, s_value, s_unit, str(ctime(float(data_dtg))), alert_type, t_value, ]) if self.tid_snmp_notify and message: self.MPQ_SNMPN5.put([ uid_mod, addr_ln, addr_mod, addr_s, s_type, s_value, s_unit, str(ctime(float(data_dtg))), alert_type, t_value, ]) except queue.Full: log = 'Can not place item in poll value queue, queue is full.' logger.exception(log)
def sms_janusess_start( dict_core: dict, time_down: int ): """ Prepares formatted SMS message for JanusESS system start :param dict_core: dict :param time_down: int :return dict_sms: dict :return stat_sms_temp: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_sms_temp = STAT_LVL['op'] dict_sms = None # Get all documents from CouchDB lanes database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_all', cdb_name='lanes', logfile=logfile ) if not stat0_cdb: body_sms = "Started {0} hr--down {1} min. ". \ format( datetime.now().strftime("%H:%M"), int(time_down) ) # Cycle through lanes to build lane status message for addr_ln in range(0, 4): # If lane status is not configured, then add the following statement if data0_cdb_out[addr_ln]['status'] >= STAT_LVL['not_cfg']: body_sms += "Ln {0} inop. ".format(addr_ln) # If lane status is operational, then add the following statement elif data0_cdb_out[addr_ln]['status'] == STAT_LVL['op']: body_sms += "Ln {0} op, {1} mod ". \ format( addr_ln, data0_cdb_out[addr_ln]['last_module'] ) # If polling is operational, add the following statements if data0_cdb_out[addr_ln]['poll'] < STAT_LVL['crit']: body_sms += "(poll). " # If polling is not operational, add the following statement else: body_sms += "(not poll). " dict_sms = { 'sms_subject': '{0} Status'.format(dict_core['name']), 'sms_body': body_sms, 'sms_distribution': 's' } log = 'JanusESS start SMS message built.' logger.debug(log) else: log = 'Failed to build JanusESS start SMS message due to CouchDB error.' logger.debug(log) stat_sms_temp = STAT_LVL['crit'] return dict_sms, stat_sms_temp
def __init__(self): """ Setup heartbeat properties """ # Unit conversions self.unit_convert = conversion.Conversion() self.MPQ_SNMPA2 = MPQ_SNMPA2 self.MPQ_SNMPA3 = MPQ_SNMPA3 self.MPQ_SNMPA4 = MPQ_SNMPA4 self.MPQ_SNMPA5 = MPQ_SNMPA5 self.MPQ_SNMPN2 = MPQ_SNMPN2 self.MPQ_SNMPN3 = MPQ_SNMPN3 self.MPQ_SNMPN4 = MPQ_SNMPN4 self.MPQ_SNMPN5 = MPQ_SNMPN5 # Heartbeat listener status # This status is not tracked outside this library self.stat_list = STAT_LVL['op'] # Set process/thread id's to False self.pid_websocket = False self.tid_snmp_agent = False self.tid_snmp_notify = False # Set base unit heartbeat status database entry to initial values self.stat_base = { 'command_listener': STAT_LVL['undeter'], 'couchdb': STAT_LVL['undeter'], 'mariadb': STAT_LVL['undeter'], 'email': STAT_LVL['undeter'], 'file': STAT_LVL['undeter'], 'influxdb': STAT_LVL['undeter'], 'interface': STAT_LVL['undeter'], 'logging': STAT_LVL['undeter'], 'network': STAT_LVL['undeter'], 'poll_data': STAT_LVL['undeter'], 'poll_dispatch': STAT_LVL['undeter'], 'snmp_agent': STAT_LVL['undeter'], 'snmp_notify': STAT_LVL['undeter'], 'tasks': STAT_LVL['undeter'] } # Update base_status document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='base_status', data_cdb_in=self.stat_base, logfile=logfile) if stat_cdb == STAT_LVL['crit']: self.stat_list = STAT_LVL['crit'] # Get tasks document from CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request(cdb_cmd='get_doc', cdb_name='config', cdb_doc='network', logfile=logfile) if stat_cdb == STAT_LVL['crit']: self.stat_list = STAT_LVL['crit'] # Set network status in tasks database entry to initial values self.stat_net = { 'network_interval': data_cdb_out['interval_bad'], 'network_check_dtg': None } self.url_net = data_cdb_out['url_server'] # Update tasks document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='network', data_cdb_in=self.stat_net, logfile=logfile) if stat_cdb == STAT_LVL['crit']: self.stat_list = STAT_LVL['crit'] # Set lane heartbeat statuses to initial values self.stat_ln = [None, None, None, None] self.rst_ln = [0, 0, 0, 0] # Get all documents from CouchDB lanes database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request(cdb_cmd='get_all', cdb_name='lanes', logfile=logfile) if stat_cdb == STAT_LVL['crit']: self.stat_list = STAT_LVL['crit'] # Cycle through lanes and set initial values for lane statuses for addr_ln in range(0, 4): self.stat_ln[addr_ln] = { 'addr_ln': addr_ln, 'status': STAT_LVL['undeter'], 'last_module': 0, 'setup_id': 0.0 } # Update lane document in CouchDB lanes database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), data_cdb_in=self.stat_ln[addr_ln], logfile=logfile) if stat1_cdb == STAT_LVL['crit']: self.stat_list = STAT_LVL['crit'] break self.stat_ln[addr_ln]['poll'] = data_cdb_out[addr_ln]['poll'] self.stat_ln[addr_ln]['last_dtg'] = data_cdb_out[addr_ln][ 'last_dtg'] self.stat_ln[addr_ln]['setup_id'] = 0 # Initialize empty poll value list self.poll_val = []
def message_templates( sms_enable: bool, msg_type: str, args: list ): """ Retrieves message template and returns message dictionary :param sms_enable: bool :param msg_type: str :param args: list :return dict_smtp: dict :return stat_smtp_temp: STAT_LVL['op'] or STAT_LVL['crit'] :return dict_sms: dict :return stat_sms_temp: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_smtp_temp = STAT_LVL['op'] dict_smtp = None # Get core document from CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='core', logfile=logfile, ) if not stat_cdb: # Determine message type and call function to build messaging body if msg_type == 'janusess_start': dict_smtp, stat_smtp_temp = smtp_janusess_start( dict_core=data_cdb_out, time_down=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_janusess_start( dict_core=data_cdb_out, time_down=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'poll_start': dict_smtp, stat_smtp_temp = smtp_poll_start( dict_core=data_cdb_out, addr_ln=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_poll_start( dict_core=data_cdb_out, addr_ln=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'poll_stop': dict_smtp, stat_smtp_temp = smtp_poll_stop( dict_core=data_cdb_out, addr_ln=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_poll_stop( dict_core=data_cdb_out, addr_ln=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'alert_new': dict_smtp, stat_smtp_temp = smtp_alert_new( dict_core=data_cdb_out, data_poll=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_alert( dict_core=data_cdb_out, data_poll=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'alert_decreased': dict_smtp, stat_smtp_temp = smtp_alert_decreased( dict_core=data_cdb_out, data_poll=args[0], dict_alert_hist=args[1] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_alert( dict_core=data_cdb_out, data_poll=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'alert_increased': dict_smtp, stat_smtp_temp = smtp_alert_increased( dict_core=data_cdb_out, data_poll=args[0], dict_alert_hist=args[1] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_alert( dict_core=data_cdb_out, data_poll=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'alert_stable': dict_smtp, stat_smtp_temp = smtp_alert_stable( dict_core=data_cdb_out, data_poll=args[0], dict_alert_hist=args[1] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_alert( dict_core=data_cdb_out, data_poll=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'alert_cancel': dict_smtp, stat_smtp_temp = smtp_alert_cancel( dict_core=data_cdb_out, data_poll=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_alert_cancel( dict_core=data_cdb_out, data_poll=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'status_dispatch': dict_smtp, stat_smtp_temp = smtp_status_dispatch(dict_core=data_cdb_out) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_status_dispatch(dict_core=data_cdb_out) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp elif msg_type == 'error_dispatch': dict_smtp, stat_smtp_temp = smtp_error_dispatch( dict_core=data_cdb_out, log_entry=args[0] ) if sms_enable and not stat_smtp_temp: dict_sms, stat_sms_temp = sms_error_dispatch( dict_core=data_cdb_out, log_entry=args[0] ) dict_smtp.update(dict_sms) stat_smtp_temp = stat_sms_temp else: log = 'Failed to build JanusESS {0} messages due to CouchDB error.'.\ format(msg_type) logger.debug(log) stat_smtp_temp = STAT_LVL['crit'] return dict_smtp, stat_smtp_temp
def init(obj_iface: TYPE_INTERFACE, addr_ln: int): """ Initializes lane :param obj_iface: Interface Object :param addr_ln: int :return mod_last: 0 (if STAT_LVL['crit']) :return mod_last: int (if STAT_LVL['op']) :return stat_ln: json (if STAT_LVL['op'] or STAT_LVL['not_cfg']) :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] :return stat_cdb: STAT_LVL['op'] or STAT_LVL['crit'] """ # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_SETUP_LOG_INIT.empty(): mpq_record = MPQ_SETUP_LOG_INIT.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) stat_ln = STAT_LVL['op'] count_mod = 0 # Each module with an entry in CouchDB modconfig database # has a setup_id field that uniquely identifies the latest # time that it was placed on a lane and setup. setup_id = time.time() # Set lane GPIO pin to READ mode so that interrupts can # be captured. This also places pin into HIGH state. data_iface_out, stat_iface = obj_iface.gpio_read(addr_ln=addr_ln, mode=True, stat_en=True) if not stat_iface: mod_last_found = False # Sets the lane on a four-lane interface, default # return of operational if interface is single lane stat_iface = obj_iface.i2c_lane_set(addr_ln=addr_ln) if not stat_iface: # This loop cycles through each connected module with address # of 0x7F until mod_last_found flag is set while not mod_last_found and (count_mod <= 126): # Retrieve memory to end of mod_uid from module with I2C address of 0x7F. # If module responds, proceed to module setup actions, otherwise # mod_last_found flag is set. time_a = time.time() high_mmap = len(MMAP) - 1 addr_mem = MMAP[high_mmap]['M_CFG_ALL'][0] data_len = MMAP[high_mmap]['M_CFG_ALL'][1] data_iface_out, stat_iface = obj_iface.i2c_read( addr_ln=addr_ln, addr_mod=0x7F, addr_mem=addr_mem, data_len=data_len, stat_en=False) print('lane {0} module {1} i2c_read mod_config: {2}'.format( addr_ln, (count_mod + 1), round((time.time() - time_a), 3))) print(data_iface_out) # Check for proper memory map version if not stat_iface and (data_iface_out[2] <= high_mmap): # Call module setup routine and return module id and status module.setup(obj_iface=obj_iface, setup_id=setup_id, cfg_bytes=data_iface_out, addr_ln=addr_ln, addr_mod=(count_mod + 1)) # Increment counter before moving to another module count_mod += 1 # Skip assigning I2C address #70 to module, on four-port interface # this address used to set the lane if count_mod == 70: count_mod += 1 print('Full lane {0} module {1} setup: {2}'.format( addr_ln, (count_mod - 1), round((time.time() - time_a), 3))) # If module has improper memory map version, or if module throws error # on I2C read, halt lane setup routine # # Impossible to differentiate from error thrown from reading connected module # or error thrown by reading non-existent module, treat both as the same and # end lane setup routine on this module. else: mod_last_found = True else: log = 'Could not setup lane due to interface and/or CouchDB error.' logger.warning(log) if count_mod >= 1: log = 'Lane {0} initialized.'.format(addr_ln) logger.info(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) log = 'Lane {0} last module is {1}.'.format(addr_ln, count_mod) logger.info(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) else: log = 'No modules found for lane {0}.'.format(addr_ln) logger.warning(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) stat_ln = STAT_LVL['not_cfg'] else: log = 'Could not complete priority 3 interface request on ' + \ 'lane {0} due to i2c lane set error.'.format(addr_ln) logger.critical(log) stat_ln = STAT_LVL['not_cfg'] MPQ_STAT.put_nowait([ 'lane', [ addr_ln, { 'status': stat_ln, 'last_module': count_mod, 'setup_id': setup_id } ] ]) # Get stat_lane view from CouchDB modconfig database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile) mdb_sql = """ SELECT * FROM aurora.lanes WHERE lane={0} """.format(addr_ln) data_mdb_out, stat_mdb, mdb_err = dbase.mdb_request(mdb_sql=mdb_sql, logfile=logfile) print(stat_mdb) print(mdb_err) print(data_mdb_out) if not stat_cdb: # Iterate through modules in view, determine which modules were # previously connected to this lane but are no longer connected. # Set their lane and module addresses to NULL and their status # to unconfigured. for dict_mod in data_cdb_out: if dict_mod['value']['setup_id'] != setup_id: data_cdb_in = { 'lane_addr': None, 'mod_addr': None, 'status': STAT_LVL['not_cfg'], 'errno': 0 } # Update module document in CouchDB modconfig database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='modconfig', cdb_doc=dict_mod['id'], data_cdb_in=data_cdb_in, logfile=logfile) if stat_cdb: log = 'Could not update module configurations due to CouchDB error.' logger.warning(log) print('UPDATED STATUS ON NON-EXISTENT MODULE: {0}'.format( dict_mod['id'])) else: log = 'Could not update module configurations due to CouchDB error.' logger.warning(log) return stat_ln, stat_cdb
def smtp_status_dispatch( dict_core: dict ): """ Prepares formatted SMTP message for statuses :param dict_core: dict :return dict_smtp: dict :return stat_smtp_temp: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_smtp_temp = STAT_LVL['op'] dict_smtp = None # Get tasks document from CouchDB config database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='update', logfile=logfile, ) # Get tasks document from CouchDB config database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='network', logfile=logfile, ) # Get all documents from CouchDB lanes database data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request( cdb_cmd='get_all', cdb_name='lanes', logfile=logfile ) if not stat0_cdb and not stat1_cdb and not stat2_cdb: body_smtp = """ JanusESS {0} (v{1}) system {2}-hour self test results: """.\ format( dict_core['name'], dict_core['version'], data0_cdb_out['updateemail_interval'], ) body_smtp += """ ======================== NETWORK CONNECTIVITY CHECK: every {0} min LAST NETWORK CONNECTIVITY CHECK: {1} """.\ format( data1_cdb_out['network_interval'], data1_cdb_out['network_check_dtg'], ) # Cycle through lanes to build lane status message for addr_ln in range(0, 4): if data2_cdb_out[addr_ln]['status'] > STAT_LVL['op']: body_smtp += """ ======================== Lane {0} is not setup. """.format(addr_ln) elif data2_cdb_out[addr_ln]['status'] == STAT_LVL['op']: body_smtp += """ ======================== Lane {0} is operational. Modules connected: {1} ------------------------ """.\ format( addr_ln, data2_cdb_out[addr_ln]['last_module'] ) # Get lane status view from CouchDB modconfig database data3_cdb_out, stat3_cdb, http3_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile, ) if not stat3_cdb: # Cycle through module statuses and add status messages to body for addr_mod in data3_cdb_out: stat_mod = '' if addr_mod['value']['status'] == 0: stat_mod = "Operational" elif addr_mod['value']['status'] == 1: stat_mod = "Operational Event" elif addr_mod['value']['status'] == 2: stat_mod = "Operational Error" elif addr_mod['value']['status'] == 3: stat_mod = "Critical Failure" elif addr_mod['value']['status'] == 4: stat_mod = "Not Setup" elif addr_mod['value']['status'] == 5: stat_mod = "Configuration Error" elif addr_mod['value']['status'] == 6: stat_mod = "Undetermined" elif addr_mod['value']['status'] == 7: stat_mod = "Not Tracked" body_smtp += """ Module {0}: {1}""". \ format( addr_mod['key'], stat_mod ) # If polling is not setup, add the following statement if data2_cdb_out[addr_ln]['poll'] > STAT_LVL['crit']: body_smtp += """ Lane {0} polling is not operating. """.format(addr_ln) # If polling is operational, add the following statement elif data2_cdb_out[addr_ln]['poll'] == STAT_LVL['op']: body_smtp += """ In progress polling discovered for lane {0}... DTG of LAST POLL: {1} """.\ format( addr_ln, data2_cdb_out[addr_ln]['last_dtg'] ) # If polling failed, add the following statement elif data2_cdb_out[addr_ln]['status'] == STAT_LVL['crit']: body_smtp += """ Lane {0} polling experienced catastrophic error. """.format(addr_ln) dict_smtp = { 'smtp_subject': 'DO NOT REPLY JanusESS {0} (v{1}): NOTICE! {2}-hour system status check'. format( dict_core['name'], dict_core['version'], data0_cdb_out['updateemail_interval'] ), 'smtp_body': body_smtp, 'smtp_distribution': 's' } log = '{0}-hour status messages built.'.format(data0_cdb_out['updateemail_interval']) logger.debug(log) else: log = 'Failed to build JanusESS status SMTP message due to CouchDB error.' logger.debug(log) stat_smtp_temp = STAT_LVL['crit'] return dict_smtp, stat_smtp_temp
def task_scheduler(db_list: list): """ JanusESS tasks scheduler :param db_list: list """ # One minute tasks systime.store() appdbase.archive(db_list) # Get tasks document from CouchDB config database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request(cdb_cmd='get_doc', cdb_name='config', cdb_doc='compact', logfile=logfile) if stat0_cdb: data0_cdb_out = { 'dbcompact_firsttime': '0:13', 'dbcompact_interval': 1 } data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request(cdb_cmd='get_doc', cdb_name='config', cdb_doc='network', logfile=logfile) if stat1_cdb: data1_cdb_out = { 'url_server': 'www.google.com', 'interval_good': 30, 'interval_bad': 5, 'url_timeout': 10 } data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request(cdb_cmd='get_doc', cdb_name='config', cdb_doc='update', logfile=logfile) if stat2_cdb: data2_cdb_out = { 'updateemail_firsttime': '0:33', 'updateemail_interval': 1 } net_int = data1_cdb_out['interval_good'] prev_net_int = net_int # Need to build earliest start time from input and properly format for dbcompact dbcompact_int = int(data0_cdb_out['dbcompact_interval']) dbcompact_hour = int(data0_cdb_out['dbcompact_firsttime'].split(":")[0]) while True: dbcompact_hour = dbcompact_hour - dbcompact_int if dbcompact_hour < 0: dbcompact_hour = str(dbcompact_hour + dbcompact_int) break if len(dbcompact_hour) == 1: dbcompact_hour = '0' + dbcompact_hour dbcompact_firsttime = str(dbcompact_hour) + ':' + data0_cdb_out[ 'dbcompact_firsttime'].split(":")[1] dbcompact_start = datetime.today().strftime( '%Y-%m-%d') + ' ' + dbcompact_firsttime + ':24' prev_dbcompact_start = dbcompact_start prev_dbcompact_int = dbcompact_int # Need to build earliest start time from input and properly format for update update_int = int(data2_cdb_out['updateemail_interval']) update_hour = int(data2_cdb_out['updateemail_firsttime'].split(":")[0]) while True: update_hour = update_hour - update_int if update_hour < 0: update_hour = str(update_hour + update_int) break if len(update_hour) == 1: update_hour = '0' + update_hour update_firsttime = str(update_hour) + ':' + data2_cdb_out[ 'updateemail_firsttime'].split(":")[1] update_start = datetime.today().strftime( '%Y-%m-%d') + ' ' + update_firsttime + ':36' prev_update_start = update_start prev_update_int = update_int # Get the hour # Get the interval # Subtract interval from hour until negative number # Concatenate hour with minute to produce new first time timezone = get_localzone() scheduler = BackgroundScheduler(timezone=timezone) scheduler.remove_all_jobs() # scheduler.print_jobs() scheduler.add_job(systime.store, 'cron', second='0') scheduler.add_job(appdbase.archive, 'cron', second='12', args=[db_list]) job_queue = scheduler.add_job(queue.clear, 'interval', minutes=60) job_network = scheduler.add_job(network.check, 'interval', minutes=net_int) job_compact = scheduler.add_job(appdbase.compact, 'interval', start_date=dbcompact_start, hours=dbcompact_int) job_update = scheduler.add_job(status.update, 'interval', start_date=update_start, hours=update_int, args=[update_int]) try: scheduler.start() scheduler.print_jobs() except (KeyboardInterrupt, SystemExit): scheduler.remove_all_jobs() scheduler.shutdown() while True: if not MPQ_NETINT.empty(): net_int = MPQ_NETINT.get_nowait() if prev_net_int != net_int: prev_net_int = net_int job_network.remove() job_network = scheduler.add_job(network.check, 'interval', minutes=net_int) data3_cdb_out, stat3_cdb, http3_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='compact', logfile=logfile) if not stat3_cdb: # Need to build earliest start time from input and properly format for dbcompact dbcompact_int = int(data3_cdb_out['dbcompact_interval']) dbcompact_hour = int( data3_cdb_out['dbcompact_firsttime'].split(":")[0]) while True: dbcompact_hour = dbcompact_hour - dbcompact_int if dbcompact_hour < 0: dbcompact_hour = str(dbcompact_hour + dbcompact_int) break if len(dbcompact_hour) == 1: dbcompact_hour = '0' + dbcompact_hour dbcompact_firsttime = str(dbcompact_hour) + ':' + data3_cdb_out[ 'dbcompact_firsttime'].split(":")[1] dbcompact_start = datetime.today().strftime( '%Y-%m-%d') + ' ' + dbcompact_firsttime + ':24' if (dbcompact_start != prev_dbcompact_start) or ( dbcompact_int != prev_dbcompact_int): job_compact.remove() job_compact = scheduler.add_job(appdbase.compact, 'interval', start_date=dbcompact_start, hours=dbcompact_int) prev_dbcompact_int = dbcompact_int prev_dbcompact_start = dbcompact_start data4_cdb_out, stat4_cdb, http4_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='update', logfile=logfile) if not stat4_cdb: # Need to build earliest start time from input and properly format for update update_int = int(data4_cdb_out['updateemail_interval']) update_hour = int( data4_cdb_out['updateemail_firsttime'].split(":")[0]) while True: update_hour = update_hour - update_int if update_hour < 0: update_hour = str(update_hour + update_int) break if len(update_hour) == 1: update_hour = '0' + update_hour update_firsttime = str(update_hour) + ':' + data4_cdb_out[ 'updateemail_firsttime'].split(":")[1] update_start = datetime.today().strftime( '%Y-%m-%d') + ' ' + update_firsttime + ':36' if (update_start != prev_update_start) or (update_int != prev_update_int): job_update.remove() job_update = scheduler.add_job(status.update, 'interval', start_date=update_start, hours=update_int, args=[update_int]) prev_update_int = update_int prev_update_start = update_start # scheduler.print_jobs() time.sleep(30)
def get_data( obj_iface: TYPE_INTERFACE, uid_mod: str, addr_ln: int, addr_mod: int ): """ Retrieves and publishes module sensor data :param obj_iface: Interface Object :param uid_mod: str :param addr_ln: int :param addr_mod: int """ # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_POLL_LOG_DATA.empty(): mpq_record = MPQ_POLL_LOG_DATA.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) time_a = time.time() log = 'Retrieving lane {0} module {1} id {2} data.'.format(addr_ln, addr_mod, uid_mod) logger.info(log) stat_poll_data = STAT_LVL['op'] uid_mod_i2c = '' uid_mod_i2c_print = '' # Retrieve memory map version of module with I2C address of 0x7F. # If module responds, proceed to module setup actions, otherwise # mod_last_found flag is set. high_mmap = len(MMAP) - 1 addr_mem = MMAP[high_mmap]['M_CFG_ALL'][0] data_len = MMAP[high_mmap]['M_CFG_ALL'][1] data0_iface_out, stat0_iface = obj_iface.i2c_read( addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=addr_mem, data_len=data_len, stat_en=False ) print('RAW POLL DATA: {0}'.format(data0_iface_out)) print('Lane {0} module {1} get_data i2c config time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_a), 3))) if stat0_iface: log = 'Lane {0} module {1} poll can '.format(addr_ln, addr_mod) + \ 'not be completed due to I2C interface error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) print(log) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', STAT_LVL['op_err'] ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod, addr_ln, addr_mod, STAT_LVL['op_err'] ] ]) stat_poll_data = STAT_LVL['op_err'] else: # Build module id string from I2C data mod_uid_end = MMAP[data0_iface_out[2]]['M_UID'][0] + \ MMAP[data0_iface_out[2]]['M_UID'][1] - 1 mod_uid_begin = MMAP[data0_iface_out[2]]['M_UID'][0] - 1 for addr_mem in range(mod_uid_end, mod_uid_begin, -1): uidmod_i2c = str(hex(data0_iface_out[addr_mem]))[2:] if len(uidmod_i2c) == 1: uidmod_i2c = '0' + uidmod_i2c uid_mod_i2c += uidmod_i2c # Check that module ids match, then proceed with data collection and reporting uid_mod_print = ''.join(char for char in uid_mod.strip() if isprint(char)) uid_mod_i2c_print = ''.join(char for char in uid_mod_i2c.strip() if isprint(char)) if uid_mod_i2c_print == uid_mod_print: time_b = time.time() # Get module document from CouchDB modconfig database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='modconfig', cdb_doc=uid_mod_print, logfile=logfile ) # Get cloud document from CouchDB config database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='cloud', logfile=logfile ) # Get core document from CouchDB config database data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='core', logfile=logfile ) print( 'Lane {0} module {1} get_data database time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_b), 3))) if not stat0_cdb and not stat1_cdb and not stat2_cdb: module_stat = STAT_LVL['op'] poll_data_mod = [] poll_head_mod = [ uid_mod_i2c_print, data2_cdb_out['customer'], data2_cdb_out['name'], data0_cdb_out['loc'], addr_ln, addr_mod, ] # Retrieves sensor polling value from module time_c = time.time() data_iface_out, stat_iface = obj_iface.i2c_read( addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=MMAP[data0_iface_out[2]]['S_ALL_VAL'][0], data_len=MMAP[data0_iface_out[2]]['S_ALL_VAL'][1] ) print('Lane {0} module {1} get_data i2c sensor time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_c), 3))) print(data_iface_out) if not stat_iface: # Cycle through all sensors installed on the module led_ctl = Control() for addr_s in range(0, int(data0_cdb_out['num_sensors'])): sensor = 'S{0}'.format(addr_s) log = 'Retrieving lane {0} module {1} sensor {2} data.'.\ format(addr_ln, addr_mod, addr_s) logger.debug(log) # Initialize polling data packet data_dtg = time.time() poll_data_s = [addr_s] # Convert raw values to floating point number, and add to packet val_raw = struct.pack( 'BBBB', int(data_iface_out[3 + (addr_s * 4)]), int(data_iface_out[2 + (addr_s * 4)]), int(data_iface_out[1 + (addr_s * 4)]), int(data_iface_out[0 + (addr_s * 4)]) ) val_convert = round( struct.unpack('>f', val_raw)[0], data0_cdb_out[sensor]['precision'] ) if (val_convert >= data0_cdb_out[sensor]['min']) or \ (val_convert <= data0_cdb_out[sensor]['max']): trig_low = round( float(data0_cdb_out[sensor]['trig_low']), data0_cdb_out[sensor]['precision'] ) trig_high = round( float(data0_cdb_out[sensor]['trig_high']), data0_cdb_out[sensor]['precision'] ) # Determine triggers if val_convert < trig_low: poll_data_s.append('low') poll_data_s.append(True) trigger = trig_low module_stat = STAT_LVL['s_evt'] led_ctl.effect( 'sensor_low', uid_mod_print, addr_ln, addr_mod ) elif val_convert > trig_high: poll_data_s.append('high') poll_data_s.append(True) trigger = trig_high module_stat = STAT_LVL['s_evt'] led_ctl.effect( 'sensor_high', uid_mod_print, addr_ln, addr_mod ) else: poll_data_s.append('off') poll_data_s.append(False) trigger = 0.0 poll_data_s.append(data0_cdb_out[sensor]['type']) poll_data_s.append(val_convert) poll_data_s.append(data_dtg) poll_data_s.append(data0_cdb_out[sensor]['unit']) poll_data_s.append(trigger) poll_data_s.append(data0_cdb_out[sensor]['trig_int']) poll_data_s.append(data0_cdb_out[sensor]['trig_step']) poll_data_mod.append(poll_data_s) MPQ_STAT.put_nowait([ 'poll', [ poll_head_mod, poll_data_s ] ]) time_e = time.time() store_data(data1_cdb_out, poll_head_mod, poll_data_mod) print('Lane {0} module {1} get_data store data time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_e), 3))) else: log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data not added to storage queue due to I2C errors.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_poll_data = STAT_LVL['op_err'] module_stat = STAT_LVL['op_err'] else: log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data not added to storage queue due to CouchDB errors.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_poll_data = STAT_LVL['op_err'] module_stat = STAT_LVL['op_err'] log = 'Completed lane {0} module {1} poll.'.format(addr_ln, addr_mod) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', stat_poll_data ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod_print, addr_ln, addr_mod, module_stat ] ]) else: stat_poll_data = STAT_LVL['op_err'] log = 'Lane {0} module {1} poll can '.format(addr_ln, addr_mod) + \ 'not be completed due to mismatch in module id: ' + \ 'requested={0} vs polled={1}.'.format(uid_mod_print, uid_mod_i2c_print) logger.critical(log) print(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', stat_poll_data ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod_print, addr_ln, addr_mod, STAT_LVL['op_err'] ] ]) return stat_poll_data, uid_mod_i2c_print
def network(self, mpq_record: list): """ Places network status into websocket and SNMP queues if status has changed. mpq_record[0] = network url checked mpq_record[1] = check interval mpq_record[2] = dtg :param mpq_record: list """ stat_change = False url = mpq_record[0] interval = mpq_record[1] check_dtg = mpq_record[2] # Only process record if key values have changed if url != self.url_net: self.url_net = url stat_change = True if interval != self.stat_net['network_interval']: self.stat_net['network_interval'] = interval stat_change = True if check_dtg != self.stat_net['network_check_dtg']: self.stat_net['network_check_dtg'] = check_dtg stat_change = True # If any key value has changed, process record if stat_change: # Initialize network check database entry data_cdb_in = { 'network_interval': self.stat_net['network_interval'], 'network_check_dtg': self.stat_net['network_check_dtg'] } # Update tasks document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='network', data_cdb_in=data_cdb_in, logfile=logfile) if stat_cdb == STAT_LVL['crit']: log = 'Failed to update tasks document in CouchDB config database.' logger.critical(log) # Only place into MPQ_WS if websocket handler processes is functioning. try: if self.pid_websocket: MPQ_WS.put_nowait([ 'network', self.url_net, self.stat_net['network_interval'], self.stat_net['network_check_dtg'] ]) except queue.Full: log = 'Can not place item in network queue, queue is full.' logger.exception(log)