def __init__(self): debug.debugPrint("Main", "Screensize: ", hw.screenwidth, hw.screenheight) logsupport.Logs.Log("Screensize: " + str(hw.screenwidth) + " x " + str(hw.screenheight)) logsupport.Logs.Log("Scaling ratio: " + "{0:.2f} W ".format(hw.dispratioW) + "{0:.2f} H".format(hw.dispratioH)) self.dim = 'Bright' # either Bright or Dim (or '' for don't change when a parameter self.state = 'Home' # one of Home, NonHome, Maint, Cover, Alert self.Deferrals = [] self.WatchVarVals = {} # most recent reported watched variable values self.AS = None # Active Screen self.Chain = 0 # which screen chain is active 0: Main chain 1: Secondary Chain self.HBScreens = historybuffer.HistoryBuffer(20, 'Screens') self.HBEvents = historybuffer.HistoryBuffer(80, 'Events') self.ActivityTimer = timers.ResettableTimer(name='ActivityTimer', start=True) self.activityseq = 0 self.ScreenStack = []
import config import logsupport # noinspection PyArgumentList CEvent = Enum( 'ConsoleEvent', 'FailSafePing ACTIVITYTIMER HubNodeChange ISYAlert ISYVar GeneralRepaint RunProc SchedEvent MouseDown MouseUp MouseMotion' ) ConsoleOpsQueue = queue.Queue() # master sequencer latencynotification = 1000 # notify if a loop latency is greater than this LateTolerance = 1.0 # for my systems HBControl = historybuffer.HistoryBuffer(80, 'Control') def PostEvent(e): if e is None: logsupport.Logs.Log('Pushing None event to queue', severity=logsupport.ConsoleError, tb=True, hb=True) return cpu = psutil.Process(config.sysStore.Console_pid).cpu_times() e.addtoevent(QTime=time.time(), usercpu=cpu.user, syscpu=cpu.system) ConsoleOpsQueue.put(e) HBControl.Entry('Post {} queuesize: {}'.format(e, ConsoleOpsQueue.qsize()))
from configobjects import Section from controlevents import CEvent, PostEvent, ConsoleEvent from logsupport import ConsoleWarning, ConsoleDetail, ConsoleError from screens import alertscreen from stores import valuestore alertprocs = {} # set by modules from alerts directory monitoredvars = [] AlertItems = None Tests = ('EQ', 'NE') AlertType = ('NodeChange', 'VarChange', 'StateVarChange', 'IntVarChange', 'LocalVarChange', 'Periodic', 'TOD', 'External', 'Init') AlertsHB = historybuffer.HistoryBuffer(100, 'Alerts') class Alert(object): def __init__(self, nm, atype, trigger, action, actionname, param): self.name = nm self._state = 'Idle' self.type = atype self.trigger = trigger self.actiontarget = action self.actionname = actionname self.param = param self.timer = None # holds timer when delayed @property def state(self):
def __init__(self, name, isyaddr, user, pwd): if isyaddr == '' or user == '': logsupport.Logs.Log( "ISY id info missing: addr: {} user: {}".format( isyaddr, user), severity=ConsoleError) raise ValueError if isyaddr.startswith('http'): self.ISYprefix = isyaddr + '/rest/' else: self.ISYprefix = 'http://' + isyaddr + '/rest/' self.ISYrequestsession = requests.session() self.ISYrequestsession.auth = (user, pwd) self.name = name self.addr = isyaddr self.user = user self.password = pwd self._NodeRoot = Folder(self, 0, '', u'0', 0, u'0') # *root* self._ProgRoot = None self.NodesByAddr = {} self._FoldersByAddr = {'0': self._NodeRoot} self._ScenesByAddr = {} self._NodesByName = {} self._NodesByFullName = {} self._ScenesByName = {} self._FoldersByName = {} self._ProgramFoldersByAddr = {} self._ProgramsByAddr = {} self._ProgramsByName = {} self._ProgramFoldersByName = {} self._HubOnline = False self.Vars = None self.ErrNodes = {} self.Busy = 0 self.V3Nodes = [ ] # temporary way to track and suppress errors from nodes we don't currently handle (todo V3) """ Build the Folder/Node/Scene tree """ logsupport.Logs.Log( "{}: Create Structure for ISY hub at {} for user {}".format( name, isyaddr, user)) trycount = 20 while True: # noinspection PyBroadException try: historybuffer.HBNet.Entry('ISY nodes get') r = self.ISYrequestsession.get(self.ISYprefix + 'nodes', verify=False, timeout=5) historybuffer.HBNet.Entry('ISY nodes get done') logsupport.Logs.Log('{}: Successful node read: {}'.format( name, r.status_code)) break except: # after total power outage ISY is slower to come back than RPi so # we wait testing periodically. Eventually we try rebooting just in case our own network # is what is hosed trycount -= 1 if trycount > 0: logsupport.Logs.Log( '{}: Hub not responding (nodes) at: {}'.format( self.name, self.ISYprefix)) time.sleep(15) else: logsupport.Logs.Log('No ISY response restart (nodes)') exitutils.errorexit(exitutils.ERRORPIREBOOT) logsupport.Logs.Log('Reached unreachable code! ISY1') if r.status_code != 200: logsupport.Logs.Log('Hub (' + self.name + ') text response:', severity=ConsoleError) logsupport.Logs.Log('-----', severity=ConsoleError) logsupport.Logs.Log(r.text, severity=ConsoleError) logsupport.Logs.Log('-----', severity=ConsoleError) logsupport.Logs.Log('Cannot access ISY - check username/password') logsupport.Logs.Log('Status code: ' + str(r.status_code)) time.sleep(10) exitutils.errorexit(exitutils.ERRORDIE) logsupport.Logs.Log('Reached unreachable code! ISY2') configdict = xmltodict.parse(r.text)['nodes'] if debug.dbgStore.GetVal('ISYLoad'): with open('/home/pi/Console/xml.dmp', 'r') as f: x1 = f.readline().rstrip('\n') x2 = f.readline().rstrip('\n') x3 = f.readline().rstrip('\n') x4 = f.readline().rstrip('\n') configdict = xmltodict.parse(x1)['nodes'] else: x2 = '' x3 = '' x4 = '' if debug.dbgStore.GetVal('ISYDump'): debug.ISYDump("xml.dmp", r.text, pretty=False, new=True) debug.ISYDump("struct.dmp", configdict, new=True) debug.ISYDump("isystream.dmp", "", pretty=False, new=True) for folder in configdict['folder']: addr = folder['address'] parentaddr = str(0) ptyp = 3 if 'parent' in folder: ptyp = int(folder['parent']['@type']) parentaddr = folder['parent']['#text'] self._FoldersByAddr[addr] = Folder(self, folder['@flag'], folder['name'], str(addr), ptyp, parentaddr) self._LinkChildrenParents(self._FoldersByAddr, self._FoldersByName, self._FoldersByAddr, self.NodesByAddr) fixlist = [] for node in configdict['node']: parentaddr = str(0) ptyp = 3 flg = 'unknown' nm = 'unknown' addr = 'unknown' enabld = 'unknown' prop = 'unknown' pnd = 'unknown' if 'parent' in node: ptyp = int(node['parent']['@type']) parentaddr = node['parent']['#text'] # noinspection PyBroadException try: flg = node['@flag'] nm = node['name'] addr = node['address'] enabld = node['enabled'] pnd = node['pnode'] prop = node['property'] n = Node(self, flg, nm, addr, ptyp, parentaddr, enabld, prop) fixlist.append((n, pnd)) self.NodesByAddr[n.address] = n except Exception as E: if prop == 'unknown': # probably a v3 polyglot node or zwave logsupport.Logs.Log( "Probable v5 node seen: {} Address: {} Parent: {} ". format(nm, addr, pnd), severity=ConsoleDetail) logsupport.Logs.Log("ISY item: {}".format(repr(node)), severity=ConsoleDetail) self.V3Nodes.append(addr) else: logsupport.Logs.Log("Problem with processing node: ", nm, ' Address: ', str(addr), ' Pnode: ', str(pnd), ' ', str(flg), '/', str(enabld), '/', repr(prop), severity=ConsoleWarning) logsupport.Logs.Log("Exc: {} ISY item: {}".format( repr(E), repr(node)), severity=ConsoleWarning) # for now at least try to avoid nodes without properties which apparently Zwave devices may have self._LinkChildrenParents(self.NodesByAddr, self._NodesByName, self._FoldersByAddr, self.NodesByAddr) for fixitem in fixlist: # noinspection PyBroadException try: fixitem[0].pnode = self.NodesByAddr[fixitem[1]] except: logsupport.Logs.Log("Problem with processing node: ", fixitem[1], severity=ConsoleWarning) for scene in configdict['group']: memberlist = [] if scene['members'] is not None: m1 = scene['members']['link'] naddr = '' # noinspection PyBroadException try: if isinstance(m1, list): for m in m1: naddr = m['#text'] memberlist.append( (int(m['@type']), self.NodesByAddr[naddr])) else: naddr = m1['#text'] memberlist.append( (int(m1['@type']), self.NodesByAddr[naddr])) except: logsupport.Logs.Log("Error adding member to scene: ", str(scene['name']), ' Node address: ', naddr, severity=ConsoleWarning) debug.debugPrint('ISYDump', 'Scene: ', m1) if 'parent' in scene: ptyp = int(scene['parent']['@type']) p = scene['parent']['#text'] else: ptyp = 0 p = '0' self._ScenesByAddr[scene['address']] = Scene( self, scene['@flag'], scene['name'], str(scene['address']), ptyp, p, memberlist) else: if scene['name'] not in ('~Auto DR', 'Auto DR'): logsupport.Logs.Log('Scene with no members ', scene['name'], severity=ConsoleWarning) self._LinkChildrenParents(self._ScenesByAddr, self._ScenesByName, self._FoldersByAddr, self.NodesByAddr) self._SetFullNames(self._NodeRoot, "") if debug.dbgStore.GetVal('ISYdbg'): self.PrintTree(self._NodeRoot, " ", 'Nodes') """ Build the Program tree """ trycount = 20 while True: # noinspection PyBroadException try: historybuffer.HBNet.Entry('ISY programs get') r = self.ISYrequestsession.get(self.ISYprefix + 'programs?subfolders=true', verify=False, timeout=5) historybuffer.HBNet.Entry('ISY programs get done') if r.status_code != 200: logsupport.Logs.Log('Hub (' + self.name + ') bad program read' + r.text, severity=ConsoleWarning) raise requests.exceptions.ConnectionError # fake a connection error if we didn't get a good read logsupport.Logs.Log('{}: Successful programs read: {}'.format( self.name, r.status_code)) break # except requests.exceptions.ConnectTimeout: except: # after total power outage ISY is slower to come back than RPi sowait testing periodically. # Eventually we try rebooting just in case our own network is what is hosed trycount -= 1 if trycount > 0: logsupport.Logs.Log( '{}: Hub not responding (programs) at: {}'.format( self.name, self.ISYprefix)) time.sleep(15) else: logsupport.Logs.Log('No ISY response restart (programs)') exitutils.errorexit(exitutils.ERRORPIREBOOT) configdict = xmltodict.parse(r.text)['programs']['program'] if debug.dbgStore.GetVal('ISYLoad'): configdict = xmltodict.parse(x2)['programs']['program'] if debug.dbgStore.GetVal('ISYDump'): debug.ISYDump("xml.dmp", r.text, pretty=False) debug.ISYDump("struct.dmp", configdict) for item in configdict: if item['@id'] == '0001': # Program Root self._ProgRoot = ProgramFolder(self, item['name'], '0001', '0001') self._ProgramFoldersByAddr['0001'] = self._ProgRoot else: if item['@folder'] == 'true': self._ProgramFoldersByAddr[item['@id']] = ProgramFolder( self, item['name'], item['@id'], item['@parentId']) else: self._ProgramsByAddr[item['@id']] = Program( self, item['name'], item['@id'], item['@parentId']) self._LinkChildrenParents(self._ProgramFoldersByAddr, self._ProgramFoldersByName, self._ProgramFoldersByAddr, self._ProgramsByAddr) self._LinkChildrenParents(self._ProgramsByAddr, self._ProgramsByName, self._ProgramFoldersByAddr, self._ProgramsByAddr) """ Get the variables """ while True: # noinspection PyBroadException try: historybuffer.HBNet.Entry('ISY vars get') r1 = self.ISYrequestsession.get(self.ISYprefix + 'vars/definitions/2', verify=False, timeout=5) r2 = self.ISYrequestsession.get(self.ISYprefix + 'vars/definitions/1', verify=False, timeout=5) historybuffer.HBNet.Entry('ISY vars get done') # for some reason var reads seem to typically take longer to complete so to at 5 sec if r1.status_code != 200 or r2.status_code != 200: logsupport.Logs.Log("Bad ISY var read" + r1.text + r2.text, severity=ConsoleWarning) raise requests.exceptions.ConnectionError # fake connection error on bad read logsupport.Logs.Log( '{}: Successful variable read: {}/{}'.format( self.name, r1.status_code, r2.status_code)) break except: # after total power outage ISY is slower to come back than RPi so we wait testing periodically # Eventually we try rebooting just in case our own network is what is hosed trycount -= 1 if trycount > 0: logsupport.Logs.Log( '{}: Hub not responding (variables) at: {}'.format( self.name, self.ISYprefix)) time.sleep(15) else: logsupport.Logs.Log('No ISY response restart (vars)') exitutils.errorexit(exitutils.ERRORPIREBOOT) logsupport.Logs.Log('Reached unreachable code! ISY4') self.Vars = valuestore.NewValueStore(isyvarssupport.ISYVars(self)) # noinspection PyBroadException try: configdictS = xmltodict.parse(r1.text)['CList']['e'] if debug.dbgStore.GetVal('ISYLoad'): configdictS = xmltodict.parse(x3)['CList']['e'] if debug.dbgStore.GetVal('ISYDump'): debug.ISYDump("xml.dmp", r1.text, pretty=False) debug.ISYDump("struct.dmp", configdictS) for v in configdictS: self.Vars.SetVal(('State', v['@name']), None) self.Vars.SetAttr(('State', v['@name']), (2, int(v['@id']))) self.Vars.AddAlert(('State', v['@name']), self._ISYVarChanged) except: logsupport.Logs.Log('No state variables defined') # noinspection PyBroadException try: configdictI = xmltodict.parse(r2.text)['CList']['e'] if debug.dbgStore.GetVal('ISYLoad'): configdictI = xmltodict.parse(x4)['CList']['e'] if debug.dbgStore.GetVal('ISYDump'): debug.ISYDump("xml.dmp", r2.text, pretty=False) debug.ISYDump("struct.dmp", configdictI) for v in configdictI: self.Vars.SetVal(('Int', v['@name']), None) self.Vars.SetAttr(('Int', v['@name']), (1, int(v['@id']))) self.Vars.AddAlert(('Int', v['@name']), self._ISYVarChanged) except: logsupport.Logs.Log('No integer variables defined') ''' Add command varibles if needed ''' cmdvar = valuestore.InternalizeVarName(self.name + ':Int:Command.' + hw.hostname.replace('-', '.')) self.alertspeclist = {} for k in valuestore.ValueStores[self.name].items(): if k == tuple(cmdvar[1:]): self.alertspeclist['RemoteCommands-' + self.name] = { 'Type': 'VarChange', 'Var': valuestore.ExternalizeVarName(cmdvar), 'Test': 'NE', 'Value': '0', 'Invoke': 'NetCmd.Command' } break self.Vars.LockStore() utilities.register_example("ISY", self) if debug.dbgStore.GetVal('ISYdbg'): self.PrintTree(self._ProgRoot, " ", 'Programs') self.HBWS = historybuffer.HistoryBuffer(150, self.name + '-WS') self.HBDirect = historybuffer.HistoryBuffer(40, self.name + '-Direct') self.isyEM = isyeventmonitor.ISYEventMonitor(self) threadmanager.SetUpHelperThread( self.name, self.isyEM.QHandler, prerestart=self.isyEM.PreRestartQHThread, poststart=self.isyEM.PostStartQHThread, postrestart=self.isyEM.PostStartQHThread) logsupport.Logs.Log( "{}: Finished creating structure for hub".format(name))
def __init__(self): self.BaseTime = 0 self.List = [] self.finder = {} self.TASKREADY = pygame.event.Event(pygame.USEREVENT, {}) self.HB = historybuffer.HistoryBuffer(100, 'EventList')
import maintscreen import utilities from logsupport import ConsoleWarning, ConsoleError import alerttasks from stores.weathprov.providerutils import SetUpTermShortener, WeathProvs import screen import historybuffer ''' Constants ''' configfilebase = "/home/pi/Console/" # actual config file can be overridden from arg1 configfilelist = {} # list of configfiles and their timestamps logsupport.SpawnAsyncLogger() HBMain = historybuffer.HistoryBuffer(40, 'Main') historybuffer.HBNet = historybuffer.HistoryBuffer(80, 'Net') atexit.register(exitutils.exitlogging) hubs.hubs.hubtypes['ISY'] = isy.ISY hubs.hubs.hubtypes['HASS'] = hasshub.HA # noinspection PyUnusedLocal def handler(signum, frame): HBMain.Entry('Signal: {}'.format(signum)) if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1): config.Running = False if signum == signal.SIGUSR1: logsupport.DevPrint('Watchdog termination')
def __init__(self, name, configsect): super(MQTTBroker, self).__init__(name, itemtyp=MQitem) self.MQTTnum = 0 self.fetcher = None self.HB = historybuffer.HistoryBuffer(40, name) # noinspection PyUnusedLocal def on_connect(client, userdata, flags, rc): logm = "Connected" if self.loopexited else "Reconnected" logsupport.Logs.Log("{}: {} stream {} with result code {}".format(self.name, logm, self.MQTTnum, rc)) for i, _ in userdata.topicindex.items(): client.subscribe(i) if logsupport.primaryBroker == self: client.subscribe([('consoles/all/errors', 1), ('consoles/all/cmd', 1), ('consoles/' + hw.hostname + '/cmd', 1), ('consoles/' + hw.hostname + '/set', 1), ('consoles/all/set', 1)]) self.loopexited = False # for i, v in userdata.vars.items(): # client.subscribe(v.Topic) # noinspection PyUnusedLocal def on_disconnect(client, userdata, rc): logsupport.Logs.Log("{}: Disconnected stream {} with result code {}".format(self.name, self.MQTTnum, rc)) # noinspection PyUnusedLocal def DoRestart(): if self.fetcher is not None and self.fetcher.is_alive(): logsupport.Logs.Log('Delaying restart until fetch completes') dly = timers.OnceTimer(10,start=True,name='RestartDelay',proc=DoDelayedRestart) ReportStatus('wait restart', hold=1) return ReportStatus('rmt restart', hold=1) exitutils.Exit_Screen_Message('Remote restart requested', 'Remote Restart') config.terminationreason = 'mqtt restart' exitutils.Exit(exitutils.REMOTERESTART) def DoDelayedRestart(evnt): PostEvent(ConsoleEvent(CEvent.RunProc, name='DelayedRestart', proc=DoRestart)) def GetStable(): self.fetcher = threading.Thread(name='FetchStableRemote', target=maintscreen.fetch_stable, daemon=True) self.fetcher.start() def GetBeta(): self.fetcher = threading.Thread(name='FetchBetaRemote', target=maintscreen.fetch_beta, daemon=True) self.fetcher.start() def UseStable(): subprocess.Popen('sudo rm /home/pi/usebeta', shell=True) def UseBeta(): subprocess.Popen('sudo touch /home/pi/usebeta', shell=True) def DumpHB(): entrytime = time.strftime('%m-%d-%y %H:%M:%S') historybuffer.DumpAll('Command Dump', entrytime) def EchoStat(): ReportStatus('running stat') def LogItem(sev): logsupport.Logs.Log('Remotely forced test message ({})'.format(sev), severity=sev, tb=False, hb=False) # noinspection PyUnusedLocal def on_message(client, userdata, msg): # command to force get: mosquitto_pub -t consoles/all/cmd -m getstable; mosquitto_pub -t consoles/all/cmd -m restart loopstart = time.time() var = [] for t, item in userdata.topicindex.items(): if t == msg.topic: var.extend(item) if msg.topic in ('consoles/all/cmd', 'consoles/' + hw.hostname + '/cmd'): cmd = msg.payload.decode('ascii') logsupport.Logs.Log('{}: Remote command received on {}: {}'.format(self.name, msg.topic, cmd)) cmdcalls = {'restart': DoRestart, 'getstable': GetStable, 'getbeta': GetBeta, 'usestable': UseStable, 'usebeta': UseBeta, 'hbdump': DumpHB, 'status': EchoStat, 'issueerror': functools.partial(LogItem, ConsoleError), 'issuewarning': functools.partial(LogItem, ConsoleWarning), 'issueinfo': functools.partial(LogItem, ConsoleInfo)} if cmd.lower() in cmdcalls: try: PostEvent(ConsoleEvent(CEvent.RunProc, name=cmd, proc=cmdcalls[cmd.lower()])) except Exception as E: logsupport.Logs.Log('Exc: {}'.format(repr(E))) else: logsupport.Logs.Log('{}: Unknown remote command request: {}'.format(self.name, cmd), severity=ConsoleWarning) return elif msg.topic == 'consoles/all/errors': d = json.loads(msg.payload.decode('ascii')) if d['node'] != hw.hostname: logsupport.Logs.LogRemote(d['node'], d['entry'], severity=d['sev'], etime=d['etime'] if 'etime' in d else 0) return elif msg.topic in ('consoles/all/set', 'consoles/' + hw.hostname + '/set'): d = json.loads(msg.payload.decode('ascii')) try: logsupport.Logs.Log('{}: set {} = {}'.format(self.name, d['name'], d['value'])) valuestore.SetVal(d['name'], d['value']) except Exception as E: logsupport.Logs.Log('Bad set via MQTT: {} Exc: {}'.format(repr(d), E), severity=ConsoleWarning) return # noinspection PySimplifyBooleanCheck if var == []: logsupport.Logs.Log('Unknown topic ', msg.topic, ' from broker ', self.name, severity=ConsoleWarning) else: for v in var: v.SetTime = time.time() if not v.jsonflds: v.Value = v.Type(msg.payload) # debug.debugPrint('StoreTrack', "Store(mqtt): ", self.name, ':', v, ' Value: ', v.Value) else: payload = '*bad json*' + msg.payload.decode('ascii') # for exception log below try: payload = json.loads(msg.payload.decode('ascii').replace('nan', 'null')) # work around bug in tasmota returning bad json for i in v.jsonflds: payload = payload[i] if payload is not None: v.Value = v.Type(payload) else: v.Value = None # debug.debugPrint('StoreTrack', "Store(mqtt): ", self.name, ':', v, ' Value: ', v.Value) except Exception as e: logsupport.Logs.Log('Error handling json MQTT item: ', v.name, str(v.jsonflds), msg.payload.decode('ascii'), str(e), repr(payload), severity=ConsoleWarning) loopend = time.time() self.HB.Entry('Processing time: {} Done: {}'.format(loopend - loopstart, repr(msg))) time.sleep(.1) # force thread to give up processor to allow response to time events # self.HB.Entry('Gave up control for: {}'.format(time.time() - loopend)) # noinspection PyUnusedLocal def on_log(client, userdata, level, buf): logsupport.Logs.Log("MQTT Log: ", str(level), " buf: ", str(buf), severity=ConsoleWarning) def _parsesection(nm, sect, prefix=''): tp = sect.get('TopicType', 'string') if tp == 'group': thistopic = sect.get('Topic', nm[-1]) rtn = {} for itemnm, val in sect.items(): rtn[itemnm] = _parsesection(nm + [itemnm], val, prefix=prefix + '/' + thistopic) return rtn else: if tp == 'float': tpcvrt = float elif tp == 'int': tpcvrt = int else: tpcvrt = str thistopic = sect.get('Topic', nm[-1]) jsonflds = sect.get('json', '') if jsonflds: jsonflds = jsonflds.split(':') tpc = (prefix + '/' + thistopic).lstrip('/') rtn = MQitem(nm, tpc, tpcvrt, int(sect.get('Expires', 99999999999999999)), jsonflds, self) if tpc in self.topicindex: self.topicindex[tpc].append(rtn) else: self.topicindex[tpc] = [rtn] return rtn self.address = configsect.get('address', None) self.password = configsect.get('password', None) self.reportstatus = configsect.get('ReportStatus', False) self.vars = {} self.ids = {} self.topicindex = {} # dict from full topics to MQitems self.loopexited = True self.MQTTnum = 0 for itemname, value in configsect.items(): if isinstance(value, Section): self.vars[itemname] = _parsesection([itemname], value) self.MQTTclient = mqtt.Client(userdata=self) self.MQTTclient.on_connect = on_connect self.MQTTclient.on_message = on_message self.MQTTclient.on_disconnect = on_disconnect if self.reportstatus or logsupport.primaryBroker is None: topic = 'consoles/' + hw.hostname + '/status' self.MQTTclient.will_set(topic, json.dumps({'status': 'dead'}), retain=True) logsupport.primaryBroker = self self.MQTTrunning = False # register the console self.Publish(node='all/nodes', topic=hw.hostname, payload=json.dumps( {'registered': time.time(), 'versionname': config.sysStore.versionname, 'versionsha': config.sysStore.versionsha, 'versiondnld': config.sysStore.versiondnld, 'versioncommit': config.sysStore.versioncommit, 'boottime': hw.bootime, 'osversion': hw.osversion, 'hw': hw.hwinfo}), retain=True, qos=1) threadmanager.SetUpHelperThread(self.name, self.MQTTLoop)
import time from threading import Thread, Event import threading import config import historybuffer import logsupport import os import signal from controlevents import CEvent, PostEvent, ConsoleEvent TimerList = {} TimerHB = historybuffer.HistoryBuffer(100, 'Timers') LongOpInProgress = False LongOpStart = {'maintenance': 0} timersshut = False def StartLongOp(nm): global LongOpInProgress, LongOpStart if nm not in LongOpStart: LongOpStart[nm] = 0 if LongOpStart[nm] != 0: logsupport.Logs.Log( 'Long op start within existing long op for {} {}'.format( nm, LongOpStart), severity=logsupport.ConsoleWarning) LongOpStart[nm] = time.time() LongOpInProgress = any(LongOpStart.values()) TimerHB.Entry('Start long op: {} {} {}'.format(nm, LongOpInProgress, LongOpStart))