def WaitForLock(LockFileName): iCount = 0 while True == os.path.isfile(LockFileName) : iCount+=1 Sleep.SleepMs(2) if iCount > 100 : # if lock lasts too long, just return something TODO - maybe log something? return False try : lockFile = os.open(LockFileName,os.O_CREAT|os.O_EXCL|os.O_RDWR) os.write(lockFile,"lock".encode('utf-8')) os.close(lockFile) return True except Exception as ex: #give it one more try # iCount=0 while True == os.path.isfile(LockFileName) : iCount+=1 Sleep.SleepMs(5) if iCount > 100 : # if lock lasts too long, just return something TODO - maybe log something? return False try : lockFile = os.open(LockFileName,os.O_CREAT|os.O_EXCL|os.O_RDWR) os.write(lockFile,"lock".encode('utf-8')) os.close(lockFile) return True except Exception as ex: #print("Bottom of routine: " + str(ex)) pass return False
def StartupWorkerProc(fnKillSignalled, userData): downstreamServer = userData[0] upstreamServer = userData[1] Sleep.SleepMs(500) downstreamServer.Start() upstreamServer.DropPackets(True) upstreamServer.Start() Watchdog.ConnectionUpdateTimer() Watchdog.WatchdogTimer() conf = Configuration.get() if None != conf.GetAutorunFilename(): GuiMgr.OnStopLiveData() #GuiMgr.OnStopPlayback() #GuiMgr.OnStopRecording(True) #drop all recorded packets GuiMgr.OnSetPlaybackSpeed(Configuration.get().GetPlaybackSpeed()) ss = Configuration.get().GetAutorunLocations() #GuiMgr.OnEnablePlayback() GuiMgr.ReadFromFile(Configuration.get().GetAutorunFilename()) GuiMgr.OnStopPlayback() Sleep.SleepMs( 100) # let gui worker threads catch up, so gui updates properly GuiMgr.OnStartPlayback() GuiMgr.OnSetRepeatMode(Configuration.get().GetAutoRunMode(), ss[0], ss[1]) else: upstreamServer.DropPackets(False) if None != conf.GetAutorunTime() and conf.GetAutorunTime( ) > 0: # specified a --time, so let's hang out for that long endTime = Time.GetCurrMS() + conf.GetAutorunTime() * 60 * 1000 Log.getLogger().info("Waiting for " + str(conf.GetAutorunTime()) + " minutes before auto shutdown") if conf.GetRecordFilename(): GuiMgr.OnStartRecording() while not fnKillSignalled() and endTime > Time.GetCurrMS(): Sleep.SleepMs(250) Log.getLogger().info("Shutting down after time period") if conf.GetRecordFilename( ): # was a recording session, so quit after that time GuiMgr.OnStopRecording() GuiMgr.WriteToFile(conf.GetRecordFilename()) Log.getLogger().info("Saving Recorded data to file: " + conf.GetRecordFilename()) GuiMgr.Quit()
def Begin(self,runOnce=False): #start udp server # start collectors if False and False == Namespace._UseSingleCollectorThreadPerNamespace and not Namespace._UseMultiThreadPerNamespace: # deprecated for collector in self._Collectors: if not collector.IsInGroup() and not collector.IsOnDemand(): collector.BeginCollecting(runOnce) Sleep.SleepMs(5) # so not at all same time # this has now really been deprecated and should not be used anymore elif True == Namespace._UseSingleCollectorThreadPerNamespace or runOnce: #one thread to do all collecting ThreadManager.GetThreadManager().CreateThread(self._ID,self.__AlternateCollectionMethod,runOnce) ThreadManager.GetThreadManager().StartThread(self._ID) else: # many threads, with multiple collectors per thread ThreadManager.GetThreadManager().CreateThread(self._ID,self.__AlternateCollectionMethodMultiThread) ThreadManager.GetThreadManager().StartThread(self._ID) if True == runOnce: return len(self._Collectors) self._Server = ServerUDP.ServerUDP(self.__ListenIP,self.__ListenPort,self) self._Server.Start() threadName = "ConnUpdateThread:" + str(self) + ":" + str(self.__ListenPort) ThreadManager.GetThreadManager().CreateThread(threadName,self.__sendConnectionInfoProc) ThreadManager.GetThreadManager().StartThread(threadName) return len(self._Collectors)
def WorkerProc(self, fnKillSignalled, userData): lastUpdate = 0 interval = Configuration.get().GetConnectionUpdateInterval() buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>" buffer = buffer + "<Oscar Type=\"ConnectionInformation\">" buffer = buffer + "<Version>1.0</Version>" buffer = buffer + "<OscarVersion>" + VersionMgr.ReadVer( ) + "</OscarVersion>" buffer = buffer + "<ID>" + Configuration.get().GetID() + "</ID>" buffer = buffer + "<Port>" + str(Configuration.get( ).GetDownstreamConnection().getPort()) + "</Port>" buffer = buffer + "</Oscar>" #<?xml version="1.0" encoding="utf-8"?> #<Oscar Type="ConnectionInformation"> # <Version>1.0</Version> # <ID>Foo</Foo> # <Port>Port</Port> #</Oscar> while not fnKillSignalled( ): # run until signalled to end - call passed function to check for the signal if lastUpdate < Time.GetCurrMS() - interval: TargetManager.GetTargetManager().BroadcastDownstream( buffer, True, None ) # send Connection Data to all downstream things (Oscars & Marvins) lastUpdate = Time.GetCurrMS() Configuration.get().RescanTargets() else: Sleep.Sleep(0.25) TargetManager.GetTargetManager( ).CheckForRemovalOfDynamicMarvins()
def __AlternateCollectionMethodMultiThread(self,fnKillSignalled,startIndex): processedWithoutRestdummymakelooklikeother = 0 ThreadCount = -1 ProcessThreadCount = self.__CreateInitialCollectorThreadGroupings() AddActiveProcessingThreads(ProcessThreadCount) firstGroupID = None firstGroupCollectors = [] collectorCount = len(self._Collectors) if collectorCount < 1: Log.getLogger().error("No Collectors to process") return for processThreadID,collectorList in self.__ProcessThreadGroupings.items(): if None == firstGroupID: firstGroupID = processThreadID firstGroupCollectors = collectorList else: ID = str(self) + processThreadID ThreadManager.GetThreadManager().CreateThread(ID,self.__SlicedThreadProc,processThreadID) # create a worker thread and pass it a list of collectors to update ThreadManager.GetThreadManager().StartThread(ID) ThreadCount += 1 while not fnKillSignalled(): # now go process the 1st group in this thread processed = self.__CollectSingleRange(fnKillSignalled,firstGroupID) if processed == 0: Sleep.SleepMs(Namespace.SleepIntervalIfNoDataCollected) if collectorCount != len(self._Collectors): # dynamic collectos must have added some pass
def OnStart(self): print("Using Console UI") signal.signal(signal.SIGINT, self.signal_handler) # make my own Ctrl+C handler now while not self._End: Sleep.SleepMs(250)
def Stop(self): if False == self.IsRunning(): Log.getLogger().error("Tried to stop Thread [" + self.__ID + "] that is not running.") return self.SignalStop() while False == self.IsStopped(): Sleep.Sleep(.01)
def ShuntWorkerProc(self, fnKillSignalled, userData): from Helpers import Configuration sleepTime = Configuration.get().GetShuntWorkerInterval() try: while not fnKillSignalled( ): # run until signalled to end - call passed function to check for the signal self.__ShuntLock.acquire() DupMap = None try: DupMap = self.__ShuntedDataByFile self.__ShuntedDataByFile = {} except Exception as Ex: Log.getLogger().info( "Unknown error in Shunt Worker Proc: " + str(Ex)) finally: self.__ShuntLock.release() if None != DupMap: for file in DupMap.keys(): tfh, tfh_path = mkstemp( ) # create temp file, copy target file to it then do all updates and copy it # back to original close(tfh) if os.path.exists(file): copy(file, tfh_path) #copy contents to temp file mapEntry = DupMap[file] for shuntEntry in mapEntry: namespace = shuntEntry[0] ID = shuntEntry[1] History = shuntEntry[2] Value = shuntEntry[3] if True == History: self.ShuntHistory(namespace, ID, Value, tfh_path) else: self.ShuntDynamicCollectorStyle( namespace, ID, Value, tfh_path) #all done processing this file, so copy temp file to #real file #Remove original file try: remove(file) except Exception as Ex: pass #Move new file move(tfh_path, file) Sleep.SleepMs(sleepTime) except Exception as Ex: Log.getLogger().info("Unknown error in Shunt Worker Proc: " + str(Ex))
def __collectionProc(self, fnKillSignalled, userData): if True == self._RunOnce: self.alternateCollectionProc() return sleepTime = Collector.__SleepInterval while not fnKillSignalled(): self.alternateCollectionProc() Sleep.SleepMs(sleepTime) # small sleep
def HandleIncomingWatchdogPacket(self,node,rawData,fromAddr): #<?xml version="1.0" encoding="utf-8"?> #<Marvin Type="WatchdogTimer"> # <Version>1.0</Version> # <MarvinVersion>17.12.22</MarvinVersion> # <UniqueID>3236</UniqueID> # <Port>5000</Port> #</Marvin> Statistics.GetStatistics().OnPacketReceivedFromDownstream(rawData) try: _ = node.getElementsByTagName('Version')[0].firstChild.nodeValue IP = fromAddr[0].lower() Port = node.getElementsByTagName('Port')[0].firstChild.nodeValue UniqueID = node.getElementsByTagName('UniqueID')[0].firstChild.nodeValue except Exception as _: Statistics.GetStatistics().OnMalformedPacketReceived("Received invalid Marvin WatchdogTimer Packet : " + rawData) return try: marvinVersion = node.getElementsByTagName('MarvinVersion')[0].firstChild.nodeValue except Exception: marvinVersion='Unknown' Key = IP + ":" + Port objTarget = TargetManager.GetTargetManager().GetDownstreamTarget(Key) if None == objTarget: objTarget = TargetManager.GetTargetManager().GetDownstreamTargetEx(IP,Port) # if using DNS, do lookup based on real IP, not DNS name if None == objTarget: Sleep.Sleep(50) #give it another shot, other thread may be doing a DNS resolution objTarget = TargetManager.GetTargetManager().GetDownstreamTargetEx(IP,Port) # if using DNS, do lookup based on real IP, not DNS name if None == objTarget: Log.getLogger().warning("Received Marvin Watchdog for unknown downstream Target: " +IP+":"+Port + " Version: " + marvinVersion) return if objTarget.getType() != ConnectionType.Marvin and objTarget.getType() != ConnectionType.DynamicMarvin : # would not know what this is until you hear back (could be another Oscar) objTarget.Type = ConnectionType.Marvin Log.getLogger().info("Connection established with Marvin Target: "+ IP + ":" + Port + " Version: " + marvinVersion) try: _ = node.getElementsByTagName('RefreshRequested')[0].firstChild.nodeValue objTarget.ReArmRefreshRequest(UniqueID) # Asked to refresh! except Exception as _: pass objTarget.StrokeWatchdogTimer()
def __SimpleWorker(self): while not ThreadManager.GetThreadManager().AllStopSignalled(): dataBlock = self.GetItemFromSynchQueue() # get data to process if None != dataBlock: rawData, FromAddr = dataBlock self.__HandleLiveData(rawData, FromAddr) # go process teh data else: # no data to process, maybe reduce woker count if self._GetWorkerThreadCount() > 2: self._DecrementWorkerThreadCount() #Log.getLogger().debug("Reducing worker threads") return else: Sleep.SleepMs(10)
def __AlternateCollectionMethod(self,fnKillSignalled,runOnce): while not fnKillSignalled(): for collector in self._Collectors: if fnKillSignalled(): # get out of possible long loop if we are to exit return if not collector.IsInGroup() and not collector.IsOnDemand(): SizeOfSentData = collector.alternateCollectionProc() if count == 0: # no data processed, sleep a bit Sleep.SleepMs(100) if runOnce: return
def __SimpleWorker(self): while not ThreadManager.GetThreadManager().AllStopSignalled(): packet = self.GetDownstreamPacket() # get data to process if None != packet: sendBuffer, ignoreTimeout, domNode, isGroup = packet self._BroadcastDownstream(sendBuffer, ignoreTimeout, domNode, isGroup) else: # no data to process, maybe reduce woker count if self.GetWorkerThreadCount() > 2: self.DecrementWorkerThreadCount() #Log.getLogger().debug("Reducing worker threads [" + str(self.GetWorkerThreadCount()) +']') return else: Sleep.SleepMs(10)
def StopAllThreads(self): for key in self.__threadList: objThread = self.__threadList[key] if True == objThread.IsRunning(): objThread.SignalStop() allDone = False while False == allDone: allDone = True for key in self.__threadList: objThread = self.__threadList[key] if True == objThread.IsRunning(): allDone = False else: objThread.ReportStopped() Sleep.Sleep(.01) # sleep for 100ms
def __sendConnectionInfoProc(self,fnKillSignalled,userData): buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>" buffer = buffer + "<Minion Type=\"ConnectionInformation\">" buffer = buffer + "<Version>1.0</Version>" buffer = buffer + "<MinionVersion>" + VersionMgr.ReadVer()+ "</MinionVersion>" buffer += "<Namespace>" + str(self) + "</Namespace>" buffer += "<Port>" + str(self._Server.getPort()) + "</Port>" buffer = buffer + "</Minion>" lastUpdate = 0 while not fnKillSignalled(): if lastUpdate + Namespace.ConnectionInfoUpdateInterval < Time.GetCurrMS(): if self.SendPacket(buffer): Log.getLogger().debug("Sent announcement to Oscar") lastUpdate = Time.GetCurrMS() Sleep.SleepMs(Namespace.ConnectionUpdateThreadSleepinterval) # Don't want to sleep for Namespace.ConnectionInfoUpdateInterval in case
def WorkerProc(self, fnKillSignalled, userData): #sequentialSent = 0 - experiment to reduce CPU utilization, but I think it was a bad idea now while not fnKillSignalled( ): # run until signalled to end - call passed function to check for the signal if self.alternateWorker(): pass #sequentialSent +=1 else: Sleep.SleepMs(100) # no data to send, so rest for a while #sequentialSent = 0 #if sequentialSent > 10: #Sleep.SleepMs(10) # sent 10 packets without a rest, take a snooze # sequentialSent = 0 # all done, so close things down if None != self.m_socket: self.m_socket.close()
def WatchdogProc(self, fnKillSignalled, userData): lastUpdate = 0 interval = Configuration.get().GetTimeoutPeriod( ) * 0.25 # send a watchdog at 4x rate of timeout buffer = "<?xml version=\"1.0\" encoding=\"utf-8\"?>" buffer = buffer + "<Oscar Type=\"WatchdogTimer\">" buffer = buffer + "<Version>1.0</Version>" buffer = buffer + "<Port>" + str( Configuration.get().GetUpstreamConnection().getPort()) + "</Port>" buffer = buffer + "</Oscar>" while not fnKillSignalled( ): # run until signalled to end - call passed function to check for the signal if lastUpdate < Time.GetCurrMS() - interval: TargetManager.GetTargetManager().BroadcastUpstreamToType( buffer, ConnectionType.UpstreamOscar ) # send heartbeat to all upstream Oscars lastUpdate = Time.GetCurrMS() Sleep.Sleep(0.25) #snooze for 250 ms
def __CollectSingleRange(self,fnKillSignalled,processThreadID): from Helpers import Configuration count = 0 currTotal = 0 maxTx = Configuration.GetMaxTransmitBufferBeforeRest() startTime = Time.GetCurrMS() collectorList = self.__GetCollectorListForThreadGroup(processThreadID) for collector in collectorList: if fnKillSignalled(): # get out of possible long loop if we are to exit return if not collector.IsInGroup() and not collector.IsOnDemand(): SizeOfSentData = collector.alternateCollectionProc() if SizeOfSentData > 0: self.IncrementSentBytes(SizeOfSentData) count+=1 currTotal += SizeOfSentData if currTotal > maxTx: # don't want to overload Oscar Sleep.SleepMs(50) currTotal = 0 #timeTaken = Time.GetCurrMS() - startTime #print(processThreadID +": " + str(timeTaken)) #if Namespace._LogTimePerProcessLoop and timeTaken > 0: # Log.getLogger().debug("Process Thread: " + collectorList[0].GetProcessThreadID() + " took " + str(timeTaken) + "ms to process one loop") #if timeTaken > Namespace._LoopTimePeriodWarningThreshold and not Namespace._LogTimePerProcessLoop : # Log.getLogger().warning("Process Thread: " + collectorList[0].GetProcessThreadID() + " took " + str(timeTaken) + "ms to process one loop - you may want to investigate.") #if "Default" != processThreadID: # print(processThreadID + " Collected " + str(count) +"/" + str(len(collectorList))) return count
def main(): parser = argparse.ArgumentParser(description='Minion Data Collector.') parser.add_argument("-i", "--input", dest='argFilename', help='specifies input file', type=extant_file, metavar="FILE") parser.add_argument("-v", "--verbose", help="prints information, values 0-3", type=int) parser.add_argument("-r", "--runonce", help="calls all collectors once and exits", action="store_true") parser.add_argument( "-a", "--aliasfile", help="specify an external file that has alias defintions", type=str) try: args = parser.parse_args() if None == args.verbose: _VerboseLevel = 0 else: _VerboseLevel = args.verbose _RunOnce = args.runonce except: return ShowVersion() if not VersionCheck.CheckVersion(): Log.getLogger().error("Invalid version of Python") return if 3 <= _VerboseLevel: Log.setLevel(logging.DEBUG) elif 2 == _VerboseLevel: Log.setLevel(logging.WARNING) elif 1 == _VerboseLevel: Log.setLevel(logging.INFO) else: Log.setLevel(logging.ERROR) curr_dir_path = os.path.dirname(os.path.realpath(__file__)) Alias.AliasMgr.AddAlias("WORKING_DIR", curr_dir_path) Alias.AliasMgr.AddEnvironmentVariables() if None != args.aliasfile: if not Alias.AliasMgr.LoadExternalAliasFile(args.aliasfile): return signal.signal( signal.SIGINT, signal.SIG_IGN ) # turn of Ctrl+C signal handler (will get inherted by sub processes if not os.path.exists(_ConfigFilename): Log.getLogger().error("Config file [" + _ConfigFilename + "] not found!") return config = Configuration.Configuration(_ConfigFilename, True) if None == config or not config.IsValid(): pass else: print("Starting Collectors...") totalCollectors = 0 for namespace in config.GetNamespaces(): totalCollectors += namespace.Begin(_RunOnce) signal.signal(signal.SIGINT, signal_handler) # make my own Ctrl+C handler now print(str(totalCollectors) + " Collectors started.") if False == _RunOnce: print("Press CTRL+C to Exit") else: print("Running Once") if False == _RunOnce: while _ThreadActive: if 0 == _VerboseLevel: for c in spinning_cursor(): countStr = '[' + str(config.GetCollectorCount()) + '] ' sys.stdout.write(countStr) sys.stdout.write(c) Sleep.SleepMs(100) sys.stdout.flush() sys.stdout.write('\b') for c in countStr: sys.stdout.write('\b') else: Sleep.SleepMs(100) print("Shutting down...") try: ThreadManager.GetThreadManager().StopAllThreads() except: pass
def __workerProc(self, fnKillSignalled, userData): from Helpers import GuiMgr self.CurrentIndex = self.startIndex self.StartTime = None xmlList = [] while not fnKillSignalled( ): # run until signalled to end - call passed function to check for the signal if self.Paused or self.Stopped: Sleep.SleepMs(100) continue if None == self.StartTime: self.StartTime = int( self.PlaybackData[self.CurrentIndex].ArrivalTime ) - 10 # can't remember why I subract 10ms... objData = self.PlaybackData[self.CurrentIndex] sleepVal = (int(objData.ArrivalTime) - self.StartTime) / self.PlaybackSpeed Sleep.SleepMs(sleepVal) try: # when looping, this data will be here after the 1st loop xmlData = self.PlaybackData[self.CurrentIndex].xmlData node = self.PlaybackData[self.CurrentIndex].firstNode except: xmlData = objData.ToXML( ) # be more efficient if I create a list of already created xmldata self.PlaybackData[ self. CurrentIndex].xmlData = xmlData # LOVe how you can just add stuff to an object in Python! if Configuration.get().GetShunting(): try: dom = xml.dom.minidom.parseString(rawData) node = dom._get_firstChild() except Exception as ex: Log.getLogger().error( "Error Something bad in Trying to re-encode saved data" ) else: node = None self.PlaybackData[self.CurrentIndex].firstNode = node TargetManager.GetTargetManager().BroadcastDownstream( xmlData, False, node) GuiMgr.OnDataPacketSentDownstream(objData, "Playback") try: self.StartTime = int( self.PlaybackData[self.CurrentIndex].ArrivalTime) except Exception: self.StartTime = None # just in case get an out of bounds error self.CurrentIndex += 1 if None == self.endIndex: self.endIndex = len(self.PlaybackData) - 1 if self.CurrentIndex >= self.endIndex: preProcessingDone = True if self.LoopMode == RepeatMode.NONE: GuiMgr.OnStopPlayback() if Configuration.get().GetExitAfterAutoPlay(): Log.getLogger().info( "Playback finished, exiting application per arguments" ) GuiMgr.Quit() elif self.LoopMode == RepeatMode.REPEAT: self.CurrentIndex = 0 self.LoopCount += 1 elif self.LoopMode == RepeatMode.LOOP: self.CurrentIndex = self.startIndex self.LoopCount += 1 self.StartTime = None
def __GovernerRest(self): Sleep.SleepMs(self.__GovernerBackoffPeriod)
def __WorkerProc(self, fnKillSignalled, userData): while not fnKillSignalled(): Sleep.SleepMs(250)
def __SlicedThreadProc(self,fnKillSignalled,processThreadID): while not fnKillSignalled(): count = self.__CollectSingleRange(fnKillSignalled,processThreadID) if 0 == count: Sleep.SleepMs(Namespace.SleepIntervalIfNoDataCollected)