def testJobReadWriteAndDelete(self): jobDir = os.path.join(os.getcwd(), "testJobDir") os.mkdir(jobDir) #If directory already exists then the test will fail command = "by your command" memory = 2^32 cpu = 1 tryCount = 100 for i in xrange(10): startTime = time.time() for j in xrange(100): j = Job(command, memory, cpu, tryCount, jobDir) self.assertEquals(j.remainingRetryCount, tryCount) self.assertEquals(j.jobDir, jobDir) self.assertEquals(j.children, []) self.assertEquals(j.followOnCommands, [ (command, memory, cpu, 0)]) self.assertEquals(j.messages, []) j.write() j = Job.read(j.getJobFileName()) self.assertEquals(j.remainingRetryCount, tryCount) self.assertEquals(j.jobDir, jobDir) self.assertEquals(j.children, []) self.assertEquals(j.followOnCommands, [ (command, memory, cpu, 0)]) self.assertEquals(j.messages, []) self.assertTrue(os.path.exists(j.getJobFileName())) j.delete() self.assertTrue(not os.path.exists(j.getJobFileName())) print "It took %f seconds to load/unload jobs" % (time.time() - startTime) #We've just used it for benchmarking, so far #Would be good to extend this trivial test system("rm -rf %s" % jobDir)
def testJobUpdate(self): jobDir = os.path.join(os.getcwd(), "testJobDir") os.mkdir(jobDir) #If directory already exists then the test will fail command = "by your command" memory = 2^32 cpu = 1 tryCount = 100 for i in xrange(40): startTime = time.time() j = Job(command, memory, cpu, tryCount, jobDir) childNumber = random.choice(range(20)) for k in xrange(childNumber): j.children.append((command, memory, cpu)) self.assertEquals(len(j.children), childNumber) j.update(tryCount=tryCount, depth=0) j = Job.read(j.getJobFileName()) self.assertEquals(len(j.children) + len(j.followOnCommands), childNumber + 1) for childJobFile, memory, cpu in j.children: cJ = Job.read(childJobFile) self.assertEquals(cJ.remainingRetryCount, tryCount) #self.assertEquals(cJ.jobDir, os.path.split(cJ)[0]) self.assertEquals(cJ.children, []) self.assertEquals(cJ.followOnCommands, [ (command, memory, cpu, 0)]) self.assertEquals(cJ.messages, []) self.assertTrue(os.path.exists(cJ.getJobFileName())) cJ.delete() self.assertTrue(not os.path.exists(cJ.getJobFileName())) self.assertEquals(os.listdir(jobDir), [ "job" ]) j.delete() print "It took %f seconds to update jobs" % (time.time() - startTime) #We've just used it for benchmarking, so far system("rm -rf %s" % jobDir)
def createFirstJob(command, config, memory=None, cpu=None, time=sys.maxint): """Adds the first job to to the jobtree. """ logger.info("Adding the first job") if memory == None or memory == sys.maxint: memory = float(config.attrib["default_memory"]) if cpu == None or cpu == sys.maxint: cpu = float(config.attrib["default_cpu"]) job = Job(command=command, memory=memory, cpu=cpu, tryCount=int(config.attrib["try_count"]), jobDir=getJobFileDirName(config.attrib["job_tree"])) job.write() logger.info("Added the first job")
def parseJobFile(absFileName): try: job = Job.read(absFileName) return job except: logger.info("Encountered error while parsing job file %s, so we will ignore it" % absFileName) return None
def parseJobFile(absFileName): try: job = Job.read(absFileName) return job except: logger.info( "Encountered error while parsing job file %s, so we will ignore it" % absFileName) return None
def main(): sys.path.append(sys.argv[1]) sys.argv.remove(sys.argv[1]) #Now we can import all the stuff.. from sonLib.bioio import getBasicOptionParser from sonLib.bioio import parseBasicOptions from sonLib.bioio import logger from sonLib.bioio import addLoggingFileHandler, redirectLoggerStreamHandlers from sonLib.bioio import setLogLevel from sonLib.bioio import getTotalCpuTime, getTotalCpuTimeAndMemoryUsage from sonLib.bioio import getTempDirectory from sonLib.bioio import makeSubDir from jobTree.src.job import Job from jobTree.src.master import getEnvironmentFileName, getConfigFileName, listChildDirs, getTempStatsFile, setupJobAfterFailure from sonLib.bioio import system ########################################## #Input args ########################################## jobTreePath = sys.argv[1] jobFile = sys.argv[2] ########################################## #Load the environment for the job ########################################## #First load the environment for the job. fileHandle = open(getEnvironmentFileName(jobTreePath), 'r') environment = cPickle.load(fileHandle) fileHandle.close() for i in environment: if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"): os.environ[i] = environment[i] # sys.path is used by __import__ to find modules if "PYTHONPATH" in environment: for e in environment["PYTHONPATH"].split(':'): if e != '': sys.path.append(e) #os.environ = environment #os.putenv(key, value) ########################################## #Setup the temporary directories. ########################################## #Dir to put all the temp files in. localSlaveTempDir = getTempDirectory() localTempDir = makeSubDir(os.path.join(localSlaveTempDir, "localTempDir")) ########################################## #Setup the logging ########################################## #Setup the logging tempSlaveLogFile = os.path.join(localSlaveTempDir, "slave_log.txt") slaveHandle = open(tempSlaveLogFile, 'w') for handler in list(logger.handlers): #Remove old handlers logger.removeHandler(handler) logger.addHandler(logging.StreamHandler(slaveHandle)) origStdErr = sys.stderr origStdOut = sys.stdout sys.stderr = slaveHandle sys.stdout = slaveHandle ########################################## #Parse input files ########################################## config = ET.parse(getConfigFileName(jobTreePath)).getroot() setLogLevel(config.attrib["log_level"]) job = Job.read(jobFile) job.messages = [] #This is the only way to stop messages logging twice, as are read only in the master job.children = [] if os.path.exists(job.getLogFileName()): #This cleans the old log file os.remove(job.getLogFileName()) logger.info("Parsed arguments and set up logging") #Try loop for slave logging ########################################## #Setup the stats, if requested ########################################## if config.attrib.has_key("stats"): startTime = time.time() startClock = getTotalCpuTime() stats = ET.Element("slave") else: stats = None ########################################## #The max time ########################################## maxTime = float(config.attrib["job_time"]) assert maxTime > 0.0 assert maxTime < sys.maxint ########################################## #Slave log file trapped from here on in ########################################## slaveFailed = False try: ########################################## #The next job ########################################## def globalTempDirName(job, depth): return job.getGlobalTempDirName() + str(depth) command, memoryAvailable, cpuAvailable, depth = job.followOnCommands[-1] defaultMemory = int(config.attrib["default_memory"]) defaultCpu = int(config.attrib["default_cpu"]) assert len(job.children) == 0 startTime = time.time() while True: job.followOnCommands.pop() ########################################## #Global temp dir ########################################## globalTempDir = makeSubDir(globalTempDirName(job, depth)) i = 1 while os.path.isdir(globalTempDirName(job, depth+i)): system("rm -rf %s" % globalTempDirName(job, depth+i)) i += 1 ########################################## #Old children, not yet deleted # #These may exist because of the lazy cleanup #we do ########################################## for childDir in listChildDirs(job.jobDir): logger.debug("Cleaning up old child %s" % childDir) system("rm -rf %s" % childDir) ########################################## #Run the job ########################################## if command != "": #Not a stub if command[:11] == "scriptTree ": ########################################## #Run the target ########################################## loadStack(command).execute(job=job, stats=stats, localTempDir=localTempDir, globalTempDir=globalTempDir, memoryAvailable=memoryAvailable, cpuAvailable=cpuAvailable, defaultMemory=defaultMemory, defaultCpu=defaultCpu, depth=depth) else: #Is another command system(command) ########################################## #Cleanup/reset a successful job/checkpoint ########################################## job.remainingRetryCount = int(config.attrib["try_count"]) system("rm -rf %s/*" % (localTempDir)) job.update(depth=depth, tryCount=job.remainingRetryCount) ########################################## #Establish if we can run another job ########################################## if time.time() - startTime > maxTime: logger.info("We are breaking because the maximum time the job should run for has been exceeded") break #Deal with children if len(job.children) >= 1: #We are going to have to return to the parent logger.info("No more jobs can run in series by this slave, its got %i children" % len(job.children)) break if len(job.followOnCommands) == 0: logger.info("No more jobs can run by this slave as we have exhausted the follow ons") break #Get the next job and see if we have enough cpu and memory to run it.. command, memory, cpu, depth = job.followOnCommands[-1] if memory > memoryAvailable: logger.info("We need more memory for the next job, so finishing") break if cpu > cpuAvailable: logger.info("We need more cpus for the next job, so finishing") break logger.info("Starting the next job") ########################################## #Finish up the stats ########################################## if stats != None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.attrib["time"] = str(time.time() - startTime) stats.attrib["clock"] = str(totalCpuTime - startClock) stats.attrib["memory"] = str(totalMemoryUsage) tempStatsFile = getTempStatsFile(jobTreePath) fileHandle = open(tempStatsFile + ".new", "w") ET.ElementTree(stats).write(fileHandle) fileHandle.close() os.rename(tempStatsFile + ".new", tempStatsFile) #This operation is atomic logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds" % (time.time() - startTime)) ########################################## #Where slave goes wrong ########################################## except: #Case that something goes wrong in slave traceback.print_exc(file = slaveHandle) logger.critical("Exiting the slave because of a failed job on host %s", socket.gethostname()) job = Job.read(jobFile) setupJobAfterFailure(job, config) job.write() slaveFailed = True ########################################## #Cleanup ########################################## #Close the slave logging slaveHandle.flush() sys.stderr = origStdErr sys.stdout = origStdOut redirectLoggerStreamHandlers(slaveHandle, sys.stderr) slaveHandle.close() #Copy back the log file to the global dir, if needed if slaveFailed: truncateFile(tempSlaveLogFile) system("mv %s %s" % (tempSlaveLogFile, job.getLogFileName())) #Remove the temp dir system("rm -rf %s" % localSlaveTempDir) #This must happen after the log file is done with, else there is no place to put the log if (not slaveFailed) and len(job.followOnCommands) == 0 and len(job.children) == 0 and len(job.messages) == 0: ########################################## #Cleanup global files at the end of the chain ########################################## job.delete()
def main(): sys.path.append(sys.argv[1]) sys.argv.remove(sys.argv[1]) #Now we can import all the stuff.. from sonLib.bioio import getBasicOptionParser from sonLib.bioio import parseBasicOptions from sonLib.bioio import logger from sonLib.bioio import addLoggingFileHandler, redirectLoggerStreamHandlers from sonLib.bioio import setLogLevel from sonLib.bioio import getTotalCpuTime, getTotalCpuTimeAndMemoryUsage from sonLib.bioio import getTempDirectory from sonLib.bioio import makeSubDir from jobTree.src.job import Job from jobTree.src.master import getEnvironmentFileName, getConfigFileName, listChildDirs, getTempStatsFile, setupJobAfterFailure from sonLib.bioio import system ########################################## #Input args ########################################## jobTreePath = sys.argv[1] jobFile = sys.argv[2] ########################################## #Load the environment for the job ########################################## #First load the environment for the job. fileHandle = open(getEnvironmentFileName(jobTreePath), 'r') environment = cPickle.load(fileHandle) fileHandle.close() for i in environment: if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"): os.environ[i] = environment[i] # sys.path is used by __import__ to find modules if "PYTHONPATH" in environment: for e in environment["PYTHONPATH"].split(':'): if e != '': sys.path.append(e) #os.environ = environment #os.putenv(key, value) ########################################## #Setup the temporary directories. ########################################## #Dir to put all the temp files in. localSlaveTempDir = getTempDirectory() localTempDir = makeSubDir(os.path.join(localSlaveTempDir, "localTempDir")) ########################################## #Setup the logging ########################################## #Setup the logging. This is mildly tricky because we don't just want to #redirect stdout and stderr for this Python process; we want to redirect it #for this process and all children. Consequently, we can't just replace #sys.stdout and sys.stderr; we need to mess with the underlying OS-level #file descriptors. See <http://stackoverflow.com/a/11632982/402891> #When we start, standard input is file descriptor 0, standard output is #file descriptor 1, and standard error is file descriptor 2. #What file do we want to point FDs 1 and 2 to? tempSlaveLogFile = os.path.join(localSlaveTempDir, "slave_log.txt") #Save the original stdout and stderr (by opening new file descriptors to the #same files) origStdOut = os.dup(1) origStdErr = os.dup(2) #Open the file to send stdout/stderr to. logDescriptor = os.open(tempSlaveLogFile, os.O_WRONLY | os.O_CREAT | os.O_APPEND) #Replace standard output with a descriptor for the log file os.dup2(logDescriptor, 1) #Replace standard error with a descriptor for the log file os.dup2(logDescriptor, 2) #Since we only opened the file once, all the descriptors duped from the #original will share offset information, and won't clobber each others' #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't #matter, since O_APPEND seeks to the end of the file before every write, but #maybe there's something odd going on... #Close the descriptor we used to open the file os.close(logDescriptor) for handler in list(logger.handlers): #Remove old handlers logger.removeHandler(handler) #Add the new handler. The sys.stderr stream has been redirected by swapping #the file descriptor out from under it. logger.addHandler(logging.StreamHandler(sys.stderr)) #Put a message at the top of the log, just to make sure it's working. print "---JOBTREE SLAVE OUTPUT LOG---" sys.stdout.flush() #Log the number of open file descriptors so we can tell if we're leaking #them. logger.debug("Next available file descriptor: {}".format( nextOpenDescriptor())) ########################################## #Parse input files ########################################## config = ET.parse(getConfigFileName(jobTreePath)).getroot() setLogLevel(config.attrib["log_level"]) job = Job.read(jobFile) job.messages = [] #This is the only way to stop messages logging twice, as are read only in the master job.children = [] #Similarly, this is where old children are flushed out. job.write() #Update status, to avoid reissuing children after running a follow on below. if os.path.exists(job.getLogFileName()): #This cleans the old log file os.remove(job.getLogFileName()) logger.info("Parsed arguments and set up logging") #Try loop for slave logging ########################################## #Setup the stats, if requested ########################################## if config.attrib.has_key("stats"): startTime = time.time() startClock = getTotalCpuTime() stats = ET.Element("slave") else: stats = None ########################################## #The max time ########################################## maxTime = float(config.attrib["job_time"]) assert maxTime > 0.0 assert maxTime < sys.maxint ########################################## #Slave log file trapped from here on in ########################################## slaveFailed = False try: ########################################## #The next job ########################################## def globalTempDirName(job, depth): return job.getGlobalTempDirName() + str(depth) command, memoryAvailable, cpuAvailable, depth = job.followOnCommands[-1] defaultMemory = int(config.attrib["default_memory"]) defaultCpu = int(config.attrib["default_cpu"]) assert len(job.children) == 0 startTime = time.time() while True: job.followOnCommands.pop() ########################################## #Global temp dir ########################################## globalTempDir = makeSubDir(globalTempDirName(job, depth)) i = 1 while os.path.isdir(globalTempDirName(job, depth+i)): system("rm -rf %s" % globalTempDirName(job, depth+i)) i += 1 ########################################## #Old children, not yet deleted # #These may exist because of the lazy cleanup #we do ########################################## for childDir in listChildDirs(job.jobDir): logger.debug("Cleaning up old child %s" % childDir) system("rm -rf %s" % childDir) ########################################## #Run the job ########################################## if command != "": #Not a stub if command[:11] == "scriptTree ": ########################################## #Run the target ########################################## loadStack(command).execute(job=job, stats=stats, localTempDir=localTempDir, globalTempDir=globalTempDir, memoryAvailable=memoryAvailable, cpuAvailable=cpuAvailable, defaultMemory=defaultMemory, defaultCpu=defaultCpu, depth=depth) else: #Is another command system(command) ########################################## #Cleanup/reset a successful job/checkpoint ########################################## job.remainingRetryCount = int(config.attrib["try_count"]) system("rm -rf %s/*" % (localTempDir)) job.update(depth=depth, tryCount=job.remainingRetryCount) ########################################## #Establish if we can run another job ########################################## if time.time() - startTime > maxTime: logger.info("We are breaking because the maximum time the job should run for has been exceeded") break #Deal with children if len(job.children) >= 1: #We are going to have to return to the parent logger.info("No more jobs can run in series by this slave, its got %i children" % len(job.children)) break if len(job.followOnCommands) == 0: logger.info("No more jobs can run by this slave as we have exhausted the follow ons") break #Get the next job and see if we have enough cpu and memory to run it.. command, memory, cpu, depth = job.followOnCommands[-1] if memory > memoryAvailable: logger.info("We need more memory for the next job, so finishing") break if cpu > cpuAvailable: logger.info("We need more cpus for the next job, so finishing") break logger.info("Starting the next job") ########################################## #Finish up the stats ########################################## if stats != None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.attrib["time"] = str(time.time() - startTime) stats.attrib["clock"] = str(totalCpuTime - startClock) stats.attrib["memory"] = str(totalMemoryUsage) tempStatsFile = getTempStatsFile(jobTreePath) fileHandle = open(tempStatsFile + ".new", "w") ET.ElementTree(stats).write(fileHandle) fileHandle.close() os.rename(tempStatsFile + ".new", tempStatsFile) #This operation is atomic logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds" % (time.time() - startTime)) ########################################## #Where slave goes wrong ########################################## except: #Case that something goes wrong in slave traceback.print_exc() logger.critical("Exiting the slave because of a failed job on host %s", socket.gethostname()) job = Job.read(jobFile) setupJobAfterFailure(job, config) job.write() slaveFailed = True ########################################## #Cleanup ########################################## #Close the slave logging #Flush at the Python level sys.stdout.flush() sys.stderr.flush() #Flush at the OS level os.fsync(1) os.fsync(2) #Close redirected stdout and replace with the original standard output. os.dup2(origStdOut, 1) #Close redirected stderr and replace with the original standard error. os.dup2(origStdOut, 2) #sys.stdout and sys.stderr don't need to be modified at all. We don't need #to call redirectLoggerStreamHandlers since they still log to sys.stderr #Close our extra handles to the original standard output and standard error #streams, so we don't leak file handles. os.close(origStdOut) os.close(origStdErr) #Now our file handles are in exactly the state they were in before. #Copy back the log file to the global dir, if needed if slaveFailed: truncateFile(tempSlaveLogFile) system("mv %s %s" % (tempSlaveLogFile, job.getLogFileName())) #Remove the temp dir system("rm -rf %s" % localSlaveTempDir) #This must happen after the log file is done with, else there is no place to put the log if (not slaveFailed) and len(job.followOnCommands) == 0 and len(job.children) == 0 and len(job.messages) == 0: ########################################## #Cleanup global files at the end of the chain ########################################## job.delete()