def fntRunEverything(mygl, itlsInstructions, nWaitMsec, nWaitHowMany): '''Start an async job for each case. Limit number of concurrent jobs to the size of the ltJobs vector. When a job completes, ship its output upline and remove it from the active lists. Two separate threads: - Wait for an empty slot; get an instruction, start an async job. - Wait for an active job to complete and remove it from lists. ''' # Fill the list of jobs with empties. for i in range(mygl.nParallel + 1): mygl.ltJobs.append(None) mygl.lockJobList = threading.Lock() mygl.lockPrint = threading.Lock() # Create new threads NTRC.ntracef(5, "RUN", "proc make thread instances") thrStart = CStartAllCases(mygl, mygl.nCoreTimer, mygl.nStuckLimit , itlsInstructions, ) thrEnd = CEndAllCases(mygl, mygl.nCoreTimer, ) mygl.llsFullOutput = [["",""]] thrStart.start() thrEnd.start() # Wait until all jobs have started and finished. thrStart.join() # Runs out of instructions. thrEnd.join() # Runs out of finished jobs. return tWaitStats(ncases=mygl.nCasesDone , slot=mygl.nWaitedForSlot, done=mygl.nWaitedForDone)
def __init__(self, mygl, mynWaitMsec): threading.Thread.__init__(self) self.gl = mygl self.nWaitMsec = mynWaitMsec self.llsFullOutput = list() NTRC.ntracef(2, "END", "exit init gl|%s| wait|%s|" % (self.gl, self.nWaitMsec))
def fnRunEverything(gl, llsInstructions, nWaitMsec, nWaitHowMany , myqFullOutput, myqWaited): '''Start an async job for each case. Limit number of concurrent jobs to the size of the ltJobs vector. When a job completes, ship its output upline and remove it from the active lists. Two separate threads: - Wait for an empty slot; get an instruction, start an async job. - Wait for an active job to complete and remove it from lists. ''' # Fill the list of jobs with empties. for i in range(gl.nParallel + 1): gl.ltJobs.append(None) gl.lockJobList = threading.Lock() # Create new threads NTRC.ntracef(5, "RUN", "proc make thread instances") thrStart = CStartAllCases(gl, gl.nWaitMsec, gl.nWaitHowMany , llsInstructions, ) thrEnd = CEndAllCases(gl, myqFullOutput, myqWaited , gl.nWaitMsec, ) gl.llsFullOutput = [["",""]] thrStart.start() thrEnd.start() # Wait until all jobs have started and finished. thrStart.join() # Runs out of instructions. thrEnd.join() # Runs out of finished jobs.
def wrap1(*args,**kwargs): if len(args)>0 and (repr(args[0]).find(" object ") >= 0 or (repr(args[0]).find(" instance ") >= 0)): _id = getattr(args[0],"ID","") NTRC.ntracef(level,facil,"entr %s <cls=%s id=|%s|> |%s| kw=%s" % (func.__name__,args[0].__class__.__name__,_id, args[1:],kwargs))
def mainNewBroker(gl): NTRC.ntrace(3, "proc params ncases|%s| nparallel|%s| " "nwaitmsec|%s| nwaitmany|%s|" % (gl.nCases, gl.nParallel, gl.nWaitMsec, gl.nWaitHowMany)) # Main loop # Create list of instructions. Each instruction is a list of # command strings. lLinesTemp = [sLine.lstrip() for sLine in sTempListOfCommands.split('\n') ] llsInstructionsTemp = [lLinesTemp] * gl.nCases # Subprocess to start all case jobs. qOut = multiprocessing.Queue() qWaited = multiprocessing.Queue() jRunJobs = multiprocessing.Process(target=fnRunEverything, name="RunJobs" , args=(gl, llsInstructionsTemp , gl.nWaitMsec, gl.nWaitHowMany , qOut, qWaited)) jRunJobs.start() # Start subproc that does the work jRunJobs.join() # and wait for it to finish. llOut = qOut.get() # Get massive output list. qOut.close() tTmp = qWaited.get() (gl.nWaitedForSlot, gl.nWaitedForDone) = (tTmp.slot, tTmp.done) qWaited.close() return llOut
def run(self): while (fnbWaitForOpening(self.gl, self.nWaitMsec, self.nWaitHowMany) ): NTRC.ntracef(3, "STRT", "proc doallcases slot avail for case|%s|" % (self.nProcess)) # L O C K with self.gl.lockJobList: # Find an empty slot in the jobs list. lEmptySlots = [idx for (idx,x) in enumerate(self.gl.ltJobs) if not x] assert len(lEmptySlots) > 0, ("Supposed to be an empty slot" "for new case, but I can\'t find one.") idxEmpty = lEmptySlots[0] # Instruction list for this job. # If the list is empty, then we are done here. # itlInstructions is an iterator that produces a list of # instructions strings for each next(). # BEWARE: instruction list might be a generator, # cannot test length. # StopIteration for generator or iterator; IndexError for dunno. try: tOneInstr = next(self.itlsInstructions) NTRC.ntracef(3, "STRT", "proc instr|%s|" % (repr(tOneInstr))) lLines, sLogFilename = tOneInstr.cmdlist, tOneInstr.logname dInstr, sRunId = tOneInstr.casedict, tOneInstr.runid except(StopIteration, IndexError): self.gl.bThatsAllFolks = True self.gl.nCasesTotal = self.gl.nCasesStarted NTRC.ntracef(1, "STRT", "proc startall " "exhausted instructions nprocess|%s|" % (self.nProcess)) break # Create resources for the job. qOut = multiprocessing.Queue() nJob = next(self.nCounter) with self.gl.lockPrint: NTRC.ntracef(0, "STRT", "proc case|%s| start |%s|" % (nJob, sRunId)) proc = multiprocessing.Process(target=fntDoOneCase , args=(tOneInstr, qOut) ) tThisJob = tJob(procid=nJob, ) # Save job in empty slot of list, and save dict # entries to get proc and queue. self.gl.dId2Proc[nJob] = proc self.gl.dId2Queue[nJob] = qOut # Save job info in jobs list. self.gl.ltJobs[idxEmpty] = tThisJob NTRC.ntracef(3, "STRT", "proc startall go slot|%s| njob|%s|" % (idxEmpty, nJob)) proc.start() self.nProcess += 1 self.gl.nCasesStarted += 1
def __init__(self, gl, myqForOutput, myqWaited , mynWaitMsec ): threading.Thread.__init__(self) self.gl = gl self.nWaitMsec = mynWaitMsec self.qForOutput = myqForOutput self.qWaited = myqWaited self.llsFullOutput = list() NTRC.ntracef(5, "END", "exit init gl|%s| qout|%s| wait|%s|" % (self.gl, self.qForOutput, self.nWaitMsec))
def __init__(self, mygl , mynWaitMsec, mynWaitHowMany , myitlsInstructions ): threading.Thread.__init__(self) self.gl = mygl self.nWaitMsec = mynWaitMsec self.nWaitHowMany = mynWaitHowMany self.nCounter = itertools.count(1) self.nProcess = 0 self.itlsInstructions = myitlsInstructions NTRC.ntracef(2, "STRT", "exit init gl|%s| instrs|%s|" % (self.gl, self.itlsInstructions))
def main(gl): """ Temp hack to make instructions for debugging. """ NTRC.ntrace(0, "Starting...") tStart = datetime.datetime.now() llFullOutput = mainNewBroker(gl) if gl.bDebugPrint: # Print all the crap that comes back. print("---------begin cases----------") for lCase in llFullOutput: sCaseOut = "" NTRC.ntrace(3, "proc fromq lCase|%s|" % (lCase)) sCaseOut = '\n'.join(lCase) print(sCaseOut) print("--------------") print("---------end cases----------") NTRC.ntrace(0, "Finished nWaitedForSlot|%s| nWaitedForDone|%s|" % (gl.nWaitedForSlot, gl.nWaitedForDone)) tEnd = datetime.datetime.now() tDif = tEnd - tStart tDifMuSec = float((tDif.seconds * 1E6) + tDif.microseconds) NTRC.ntrace(0, "Time total|%.3f|sec cases|%s| parallel|%s| " "per case|%.0f|msec" % (tDifMuSec/1E6, gl.nCases, gl.nParallel, tDifMuSec/gl.nCases/1E3))
def fntDoOneCase(mylInstruction, qToUse, mysLogfileName): """Input: list of instructions generated by the broker for this case; multiprocessing queue through which to report results. Remove blanks, comments, etc., from the instructions. Each line that is not blank or comment is a command to be executed. Blanks and comments are written directly into the output. Output: list of commands and their output, sent to the supplied queue. The text will also be written to a log file for the case. This function will be a multiprocessing external process. """ sWhoami = multiprocessing.current_process().name nProc = fnsGetProcessNumber(sWhoami) lResults = [] # list of strings # Process all lines of instructions and collect results. for nLine, sLine in enumerate(mylInstruction): if fnbDoNotIgnoreLine(sLine): # Genuine line; execute and collect answer line(s). tAnswer = fntDoOneLine(sLine, nProc, nLine) (nRtn, nErr, lResult) = (tAnswer.callstatus , tAnswer.cmdstatus , tAnswer.ltext) lResults.extend(lResult) NTRC.ntracef(4, "DO1", "proc DoOneCase case|%s| line|%s| " "lResult|%s|" % (nProc, nLine, lResult)) else: # Comment or blank line; just append to results. lResults.extend([("-"*len(fnsGetTimestamp())) , (fnsGetTimestamp() + " " + sLine)]) NTRC.ntracef(4, "DO1", "proc DoOneCase case|%s| line|%s| " "comment|%s|" % (nProc, nLine, sLine)) fnWriteLogFile(nProc, (lResults), mysLogfileName) lPrefix = [("BEGIN results from " + sWhoami)] lSuffix = [("ENDOF results from " + sWhoami)] lResultsToSee = ['\n'] + lPrefix + lResults + lSuffix + ['\n'] tAnswers = tLinesOut(procname=sWhoami, listoflists=lResultsToSee) qToUse.put(tAnswers) qToUse.close() return (tAnswers)
def fnbWaitForOpening(mynWaitTimeMsec, mynWaitMax): nWait = mynWaitMax while nWait: nAlive = fnnHowManyAlive(gl) if nAlive < gl.nParallel: break else: nWait -= 1 gl.nWaitedForSlot += 1 if gl.bDebugPrint: print(".", end='') # DEBUG time.sleep(mynWaitTimeMsec / 1000.0) NTRC.ntracef(5, "WAIT", "proc waitforopening timesleft|%s| " "nwaited|%s|" % (nWait, gl.nWaitedForSlot)) if nWait <= 0: raise ValueError("Waited too long for empty job slot.") else: return (nWait > 0)
def mainNewBroker(gl): NTRC.ntrace(3, "proc params ncases|%s| nparallel|%s| " "nwaitmsec|%s| nwaitmany|%s|" % (gl.nCases, gl.nParallel, gl.nWaitMsec, gl.nWaitHowMany)) # Main loop # Create list of instructions. Each instruction is a list of # command strings. lLinesTemp = [sLine.lstrip() for sLine in sTempListOfCommands.split('\n') ] # And make a list of instructions for each case. llsInstructionsTemp = [lLinesTemp] * gl.nCases tTmp = fnRunEverything(gl, iter(llsInstructionsTemp) , gl.nWaitMsec, gl.nWaitHowMany) (gl.nWaitedForSlot, gl.nWaitedForDone) = (tTmp.slot, tTmp.done) return [] # used to be llOut in early proto
def fntDoOneLine(mysLine, mynProc, mynLine): """Execute one command. Input: single line of command. Output: tuple of the (Popen PIPE return code, command return code, list of output lines as strings. Input lines and the first line of output blocks have timestamps; other lines in output blocks are indented with spaces. """ sTimeBegin = fnsGetTimestamp() proc = (subprocess.Popen(mysLine , stdout=subprocess.PIPE , close_fds=True # The default anyway, I think. , stderr=subprocess.DEVNULL , universal_newlines=True , shell=True) ) (sProcOut, sProcErr) = proc.communicate() proc.stdout.close() if not sProcErr: sProcErr = "" sTimeEnd = fnsGetTimestamp() # Format lines for output by timestamping or indenting each line. sOut = ("-"*len(sTimeBegin) + "\n" + sTimeBegin + " " + "$ " + mysLine + "\n") lTmpOut1 = sProcOut.rstrip().split("\n") lTmpOut2 = [fnsStampLine(sTimeEnd, sLine, (i==0)) for i,sLine in enumerate(lTmpOut1)] sOut += "\n".join(lTmpOut2) sOut += sProcErr.rstrip() # Collect and return everything to caller. nCmdStat = "n/a - RBL" nReturnCode = proc.returncode lOut = sOut.split("\n") NTRC.ntracef(4, "DO1L", "proc DoOneLine case|%s| line|%s| " "sline|%s| lResult|%s|" % (mynProc, mynLine, mysLine, lOut)) return(tLineOut(callstatus=nReturnCode, cmdstatus=nCmdStat , linenr=mynLine, casenr=mynProc, ltext=lOut))
def greet2(*names): NTRC.ntrace(3, f"proc namelist={names!r}") print('Hello', end='') for name in names: print(', ' + name, end='') print(':')
def run(self): NTRC.ntracef(5, "END", "proc run ltJobs|%s|" % (self.gl.ltJobs)) nCasesDone = 0 self.gl.nWaitedForDone = 0 while True: # L O C K with self.gl.lockJobList: NTRC.ntracef(3, "END", "proc ltJobs|%s|" % (self.gl.ltJobs)) ltActiveJobs = [(idx,tJob) for idx,tJob in enumerate(self.gl.ltJobs) if tJob] NTRC.ntracef(3, "END", "proc ltActiveJobs|%s|" % (ltActiveJobs)) for idxtJob in ltActiveJobs: idx,tJob = idxtJob nJob = tJob.procid proc = self.gl.dId2Proc[nJob] if not proc.is_alive(): NTRC.ntracef(3, "END", "proc endall found done " "ltJobs[%s]=procid|%s|=|%s| alive?|%s|" % (idx, nJob, proc, proc.is_alive())) # Job listed as still baking but reports that it is done. # Wait until it is fully baked. proc.join() with self.gl.lockPrint: NTRC.ntracef(0, "END", "proc case|%s| end " % (nJob)) # Get its output for the full debug list. queue = self.gl.dId2Queue[nJob] lQOutput = [] while not queue.empty(): lLinesOut = queue.get().listoflists lQOutput.append(lLinesOut) queue.close() if self.gl.bDebugPrint: NTRC.ntracef(5, "END", "proc lQOutput from q|%s|" % (lQOutput)) self.llsFullOutput.extend(lQOutput) NTRC.ntracef(5, "END", "proc lOutput from q|%s|" % (self.llsFullOutput)) # Remove job from active list and Id-dicts. # If the queue objects are still in the dId2Queue dict, # the pipe remains open, oops. self.gl.ltJobs[idx] = None self.gl.dId2Proc.pop(nJob) self.gl.dId2Queue.pop(nJob) nCasesDone += 1 self.gl.nCasesDone += 1 NTRC.ntracef(3, "STRT", "proc job completed ndone|%s|" % (self.gl.nCasesDone)) NTRC.ntracef(3, "END", "proc end for-activejobs1" " thatsall?|%s| ndone|%s| nstarted|%s|" % (self.gl.bThatsAllFolks , self.gl.nCasesDone, self.gl.nCasesStarted)) if (self.gl.bThatsAllFolks and self.gl.nCasesDone == self.gl.nCasesTotal): break else: self.gl.nWaitedForDone += 1 NTRC.ntracef(3, "END", "proc end for-activejobs2 wait, " "ndone|%s| nwaits|%s|" % (nCasesDone, self.gl.nWaitedForDone)) time.sleep(self.nWaitMsec / 1000.0) continue # E N D L O C K # llsFullOutput is a list of list of strings, where # the inner list is lines output from commands for # one job, more or less, with prefix and suffix # and comments, too. # Paste the whole thing together into a yuge list of lines. if self.gl.bDebugPrint: sFullOutput = "" for lJobOut in self.llsFullOutput: sJobOut = "\n".join(lJobOut) sFullOutput += sJobOut NTRC.ntracef(5, "END", "proc sFullOutput|%s|" % (sFullOutput))
testDecoFancy1() testDecoFancy2() # E N T R Y P O I N T if 1: print("============= Begin =============") setNewDefaults(NTRC, mylevel=0, mytarget=0, myfile="", myfacility="", mytime="YES", myhtml="", myproduction=0) NTRC.ntrace(0, "BEGIN") setNewDefaults(NTRC, mylevel=6, mytarget=0, myfile="", myfacility="all-aaa", mytime="", myhtml="", myproduction=0) testAllLevels() testVariousLevels() testAllFacils()
def testFacils(): print("========== testFacils ============") NTRC.ntrace(0, "test level 0") NTRC.ntrace(1, "test level 1") NTRC.ntracef(1, "AAA", "facil AAA at test level 1") NTRC.ntracef(1, "BBB", "facil AAA at test level 1")
def testAllLevels(): print("\n========== testAllLevels ============\n") NTRC.ntrace(0, "test level 0") NTRC.ntrace(1, "test level 1") NTRC.ntrace(2, "test level 2") NTRC.ntrace(3, "test level 3") NTRC.ntrace(4, "test level 4") NTRC.ntrace(5, "test level 5")
def testOneLevel(): print("============ testOneLevel =============") NTRC.ntracef(0, "A", "facil A at test level 0") NTRC.ntracef(0, "B", "facil B at test level 0") NTRC.ntracef(0, "C", "facil C at test level 0")
def run(self): while (fnbWaitForOpening(self.nWaitMsec, self.nWaitHowMany) ): NTRC.ntracef(3, "STRT", "proc doallcases slot avail for case|%s|" % (self.nProcess)) # How many active jobs? If maxed out, wait for an empty slot # and try again. # L O C K with self.gl.lockJobList: nActive = len([tJob for tJob in self.gl.ltJobs if tJob]) # E N D L O C K if nActive >= self.gl.nParallel: NTRC.ntracef(3, "STRT", "proc startall slots full nActive|%s|" % (nActive)) time.sleep(self.nWaitMsec / 1000.0) continue # L O C K with gl.lockJobList: # Find an empty slot in the jobs list. lEmptySlots = [idx for (idx,x) in enumerate(gl.ltJobs) if not x] idxEmpty = lEmptySlots[0] # Instruction list for this job. # If the list is empty, then we are done here. # BEWARE: instruction list might be a generator, # cannot test length. ONLY IRL # StopIteration for generator; IndexError for list. # BZZZT: this doesn't work; cannot pop() a generator. # Only for loop can do this for both types, rats. # Another stinking level of indentation. try: lLines = self.llInstructions.pop(0) except(StopIteration, IndexError): self.gl.bThatsAllFolks = True self.gl.nCasesTotal = self.gl.nCasesStarted NTRC.ntracef(1, "STRT", "proc startall " "exhausted instructions nprocess|%s|" % (self.nProcess)) break # Create resources for the job. qOut = multiprocessing.Queue() nJob = next(self.nCounter) sLogFile = "foo" + str(nJob) + ".log" # TEMP proc = multiprocessing.Process(target=fntDoOneCase , args=(lLines, qOut, sLogFile, ) ) tThisJob = tJob(procid=nJob, ) # Save job in empty slot of list, and save dict # entries to get proc and queue. self.gl.dId2Proc[nJob] = proc self.gl.dId2Queue[nJob] = qOut # Save job info in jobs list. self.gl.ltJobs[idxEmpty] = tThisJob NTRC.ntracef(3, "STRT", "proc startall go slot|%s| njob|%s|" % (idxEmpty, nJob)) proc.start() self.nProcess += 1 self.gl.nCasesStarted += 1
# example1.py # program to display student's marks from record from NewTrace import NTRC import random marks = {'James': 90, 'Jules': 55, 'Arthur': 77} student_list = list(marks.keys()) # Include a ringer, a name that is not in the dictionary, to test for failure. student_list.append('Soyuj') # Pick a student at random from the list. student_name = student_list[random.randrange(len(student_list))] print("Looking for grades of student " + student_name) # Look through the list one by one, the hard way. # (The easy way would be getattr() or try-except.) for student in marks.keys(): NTRC.ntrace(3, f'found {student}, looking for {student_name}') if student == student_name: print(f"Marks for student {student_name} = {marks[student]}") break else: print(f'No entry found for the name: {student_name}.')
_whatsit = "singleton instance of class NewTrace" # NEW VERSION NTRC INSTANCE NTRC = CSingletonNewTrace() ############ SPECIFIC INSERTED TRACE CALLS ############# # Simple. Just like sprinkling print() but more informative. NTRC.ntracef(3, "READ", "proc fdGetParams1 file not found |%s|" % (sFile)) ==> 20210107_222604 3 READ proc fdGetParams1 file not found |../hl/servers.csv| or NTRC.ntracef(3, "FMT", "proc FormatQuery item key|%s| val|%s| result|%s|" % (sAttrib, sValue, result)) ==> 20210108_130214 3 FMT proc FormatQuery item key|nDocSize| val|50| result|50| # Yes, "string % (values)" rather than f-strings or .format() because # 1. Python 2 when starting out; # 2. I think it's actually more readable; # 3. Dinosaur.
Sort a dictionary with keys of the form <letter><number>. Return a tuple of item tuples from the dict in numeric key order. (Readable code rather than unmaintainable one-liner.) ''' lTmp1 = ((fnIntPlease(x[0][1:]), x) for x in dIn.items()) lTmp2 = sorted(lTmp1, key=lambda y: y[0]) lTmp3 = (z[1] for z in lTmp2) return tuple(lTmp3) -------------------------------------------------------------------------------- ############ SPECIFIC INSERTED TRACE CALLS ############# # Simple. Just like sprinkling print() but more informative. NTRC.ntracef(3, "READ", "proc fdGetParams1 file not found |%s|" % (mysFile)) ==> 20210107_222604 3 READ proc fdGetParams1 file not found |../hl/servers.csv| or NTRC.tracef(3, "FMT", "proc FormatQuery item key|%s| val|%s| result|%s|" % (sAttrib, sValue, result)) ==> 20210108_130214 3 FMT proc FormatQuery item key|nDocSize| val|50| result|50| -------------------------------------------------------------------------------- # Less simple: contains a lot of information useful for debugging. NTRC.tracef(3, "SERV", "proc mAddDocument serv|%s| id|%s| "