コード例 #1
0
ファイル: toilClean.py プロジェクト: arkal/toil
def main():
    """Removes the JobStore from a toil run.
    """

    ##########################################
    #Construct the arguments.
    ##########################################

    parser = getBasicOptionParser()
    parser.add_argument("jobStore", type=str,
                      help=("Store in which to place job management files \
                      and the global accessed temporary files"
                      "(If this is a file path this needs to be globally accessible "
                      "by all machines running jobs).\n"
                      "If the store already exists and restart is false an"
                      " ExistingJobStoreException exception will be thrown."))
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    ##########################################
    #Survey the status of the job and report.
    ##########################################
    logger.info("Checking if we have files for toil")
    try:
        jobStore = loadJobStore(options.jobStore)
    except JobStoreCreationException:
        logger.info("The specified JobStore does not exist, it may have already been deleted")
        sys.exit(0)

    logger.info("Deleting the JobStore")
    jobStore.deleteJobStore()
コード例 #2
0
ファイル: toilKill.py プロジェクト: benedictpaten/toil
def main():
    parser = getBasicOptionParser("usage: %prog [--jobStore] JOB_TREE_DIR [more options]", "%prog 0.1")
    
    parser.add_option("--jobStore", dest="jobStore",
                      help="Job store path. Can also be specified as the single argument to the script.")
    
    options, args = parseBasicOptions(parser)
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)
    
    assert len(args) <= 1 #Only toil may be specified as argument
    if len(args) == 1: #Allow toil directory as arg
        options.jobStore = args[0]
        
    logger.info("Parsed arguments")
    if options.jobStore == None:
        parser.error("Specify --jobStore")

    jobStore = loadJobStore(options.jobStore)
    
    logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore)
    ####This behaviour is now broken
    batchSystem = loadBatchSystem(jobStore.config) #This should automatically kill the existing jobs.. so we're good.
    for jobID in batchSystem.getIssuedBatchJobIDs(): #Just in case we do it again.
        batchSystem.killBatchJobs(jobID)
    logger.info("All jobs SHOULD have been killed")
コード例 #3
0
def main():
    parser = getBasicOptionParser()

    parser.add_argument(
        "jobStore",
        type=str,
        help=("Store in which to place job management files \
              and the global accessed temporary files"
              "(If this is a file path this needs to be globally accessible "
              "by all machines running jobs).\n"
              "If the store already exists and restart is false an"
              " ExistingJobStoreException exception will be thrown."))
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)

    jobStore = loadJobStore(options.jobStore)

    logger.info(
        "Starting routine to kill running jobs in the toil workflow: %s" %
        options.jobStore)
    ####This behaviour is now broken
    batchSystem = loadBatchSystem(
        jobStore.config
    )  #This should automatically kill the existing jobs.. so we're good.
    for jobID in batchSystem.getIssuedBatchJobIDs(
    ):  #Just in case we do it again.
        batchSystem.killBatchJobs(jobID)
    logger.info("All jobs SHOULD have been killed")
コード例 #4
0
ファイル: toilStats.py プロジェクト: kellrott/toil
def main():
    """ Reports stats on the workflow, use with --stats option to toil.
    """
    parser = getBasicOptionParser()
    initializeOptions(parser)
    options = parseBasicOptions(parser)
    checkOptions(options, parser)
    jobStore = loadJobStore(options.jobStore)
    stats = getStats(options)
    collatedStatsTag = processData(jobStore.config, stats, options)
    reportData(collatedStatsTag, options)
コード例 #5
0
ファイル: job.py プロジェクト: adamnovak/toil
def promisedJobReturnValueUnpickleFunction(jobStoreString, jobStoreFileID):
    """
    The PromisedJobReturnValue custom unpickle function.
    """
    global promisedJobReturnValueUnpickleFunction_jobStore
    if promisedJobReturnValueUnpickleFunction_jobStore == None:
        promisedJobReturnValueUnpickleFunction_jobStore = loadJobStore(jobStoreString)
    promiseFilesToDelete.add(jobStoreFileID)
    with promisedJobReturnValueUnpickleFunction_jobStore.readFileStream(jobStoreFileID) as fileHandle:
        value = cPickle.load(fileHandle) #If this doesn't work then the file containing the promise may not exist or be corrupted.
        return value
コード例 #6
0
ファイル: toilStats.py プロジェクト: arkal/toil
def main():
    """ Reports stats on the workflow, use with --stats option to toil.
    """
    parser = getBasicOptionParser()
    initializeOptions(parser)
    options = parseBasicOptions(parser)
    checkOptions(options, parser)
    jobStore = loadJobStore(options.jobStore)
    stats = getStats(options)
    collatedStatsTag = processData(jobStore.config, stats, options)
    reportData(collatedStatsTag, options)
コード例 #7
0
ファイル: job.py プロジェクト: benedictpaten/toil
def promisedJobReturnValueUnpickleFunction(jobStoreString, jobStoreFileID):
    """The PromisedJobReturnValue custom unpickle function.
    """
    #If the attributes jobStoreFileID and jobStoreString are None then we return
    #a new empty PromisedJobReturnValue (this can happen if Promise is serialised between its creation
    #and the running of Job._setFileIDsForPromisedValues.
    if jobStoreString == None:
        assert jobStoreFileID == None
        return PromisedJobReturnValue()
    jobStore = loadJobStore(jobStoreString)
    with jobStore.readFileStream(jobStoreFileID) as fileHandle:
        value = cPickle.load(fileHandle) #If this doesn't work then the file containing the promise may not exist or be corrupted.
        return value
コード例 #8
0
ファイル: utilsTest.py プロジェクト: adderan/toil
    def testMultipleJobsPerWorkerStats(self):
        """
        Tests case where multiple jobs are run on 1 worker to insure that all jobs report back their data
        """
        options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
        options.clean = 'never'
        options.stats = True
        Job.Runner.startToil(RunTwoJobsPerWorker(), options)

        jobStore = loadJobStore(options.jobStore)
        stats = getStats(options)
        collatedStats =  processData(jobStore.config, stats, options)
        self.assertTrue(len(collatedStats.job_types)==2,"Some jobs are not represented in the stats")
コード例 #9
0
ファイル: toilStats.py プロジェクト: adamnovak/toil
def main():
    """ Reports stats on the job-tree, use with --stats option to toil.
    """

    parser = getBasicOptionParser()
    initializeOptions(parser)
    options = parseBasicOptions(parser)
    checkOptions(options, parser)
    jobStore = loadJobStore(options.jobStore)
    #collatedStatsTag = cacheAvailable(options)
    #if collatedStatsTag is None:
    stats = getStats(options)
    collatedStatsTag = processData(jobStore.config, stats, options)
    reportData(collatedStatsTag, options)
コード例 #10
0
def main():
    """ Reports stats on the job-tree, use with --stats option to toil.
    """

    parser = getBasicOptionParser(
        "usage: %prog [--jobStore] JOB_TREE_DIR [options]", "%prog 0.1")
    initializeOptions(parser)
    options, args = parseBasicOptions(parser)
    checkOptions(options, args, parser)
    jobStore = loadJobStore(options.jobStore)
    #collatedStatsTag = cacheAvailable(options)
    #if collatedStatsTag is None:
    stats = getStats(options)
    collatedStatsTag = processData(jobStore.config, stats, options)
    reportData(collatedStatsTag, options)
コード例 #11
0
    def testMultipleJobsPerWorkerStats(self):
        """
        Tests case where multiple jobs are run on 1 worker to insure that all jobs report back their data
        """
        options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
        options.clean = 'never'
        options.stats = True
        Job.Runner.startToil(RunTwoJobsPerWorker(), options)

        jobStore = loadJobStore(options.jobStore)
        stats = getStats(options)
        collatedStats = processData(jobStore.config, stats, options)
        self.assertTrue(
            len(collatedStats.job_types) == 2,
            "Some jobs are not represented in the stats")
コード例 #12
0
ファイル: toilStats.py プロジェクト: arkal/toil
def getStats(options):
    """ Collect and return the stats and config data.
    """
    def aggregateStats(fileHandle, aggregateObject):
        try:
            stats = json.load(fileHandle, object_hook=Expando)
            for key in stats.keys():
                if key in aggregateObject:
                    aggregateObject[key].append(stats[key])
                else:
                    aggregateObject[key] = [stats[key]]
        except ValueError:
            logger.critical("File %s contains corrupted json. Skipping file." %
                            fileHandle)
            pass  # The file is corrupted.

    jobStore = loadJobStore(options.jobStore)
    aggregateObject = Expando()
    callBack = partial(aggregateStats, aggregateObject=aggregateObject)
    jobStore.readStatsAndLogging(callBack, readAll=True)
    return aggregateObject
コード例 #13
0
ファイル: toilKill.py プロジェクト: joshuabhk/toil
def main():
    parser = getBasicOptionParser()

    parser.add_argument("jobStore", type=str,
              help=("Store in which to place job management files \
              and the global accessed temporary files"
              "(If this is a file path this needs to be globally accessible "
              "by all machines running jobs).\n"
              "If the store already exists and restart is false an"
              " ExistingJobStoreException exception will be thrown."))
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)

    jobStore = loadJobStore(options.jobStore)
    
    logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore)
    ####This behaviour is now broken
    batchSystem = loadBatchSystem(jobStore.config) #This should automatically kill the existing jobs.. so we're good.
    for jobID in batchSystem.getIssuedBatchJobIDs(): #Just in case we do it again.
        batchSystem.killBatchJobs(jobID)
    logger.info("All jobs SHOULD have been killed")
コード例 #14
0
ファイル: toilStats.py プロジェクト: kellrott/toil
def getStats(options):
    """ Collect and return the stats and config data.
    """

    def aggregateStats(fileHandle, aggregateObject):
        try:
            stats = json.load(fileHandle, object_hook=Expando)
            for key in stats.keys():
                if key in aggregateObject:
                    aggregateObject[key].append(stats[key])
                else:
                    aggregateObject[key] = [stats[key]]
        except ValueError:
            logger.critical("File %s contains corrupted json. Skipping file." % fileHandle)
            pass  # The file is corrupted.

    jobStore = loadJobStore(options.jobStore)
    aggregateObject = Expando()
    callBack = partial(aggregateStats, aggregateObject=aggregateObject)
    jobStore.readStatsAndLogging(callBack, readAll=True)
    return aggregateObject
コード例 #15
0
def getStats(options):
    """ Collect and return the stats and config data.
    """
    
    jobStore = loadJobStore(options.jobStore)
    try:
        with jobStore.readSharedFileStream("statsAndLogging.xml") as fH:
            stats = ET.parse(fH).getroot() # Try parsing the whole file.
    except ET.ParseError: # If it doesn't work then we build the file incrementally
        sys.stderr.write("The toil stats file is incomplete or corrupt, "
                         "we'll try instead to parse what's in the file "
                         "incrementally until we reach an error.\n")
        with jobStore.readSharedFileStream("statsAndLogging.xml") as fH:
            stats = ET.Element("stats")
            try:
                for event, elem in ET.iterparse(fH):
                    if elem.tag == 'worker':
                        stats.append(elem)
            except ET.ParseError:
                # TODO: Document why parse errors are to be expected
                pass # Do nothing at this point
    return stats
コード例 #16
0
ファイル: toilClean.py プロジェクト: kellrott/toil
def main():
    """Removes the JobStore from a toil run.
    """

    ##########################################
    # Construct the arguments.
    ##########################################

    parser = getBasicOptionParser()
    parser.add_argument(
        "jobStore",
        type=str,
        help=(
            "Store in which to place job management files \
                      and the global accessed temporary files"
            "(If this is a file path this needs to be globally accessible "
            "by all machines running jobs).\n"
            "If the store already exists and restart is false an"
            " ExistingJobStoreException exception will be thrown."
        ),
    )
    parser.add_argument("--version", action="version", version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    ##########################################
    # Survey the status of the job and report.
    ##########################################
    logger.info("Checking if we have files for toil")
    try:
        jobStore = loadJobStore(options.jobStore)
    except JobStoreCreationException:
        logger.info("The specified JobStore does not exist, it may have already been deleted")
        sys.exit(0)

    logger.info("Deleting the JobStore")
    jobStore.deleteJobStore()
コード例 #17
0
ファイル: toilStatus.py プロジェクト: BD2KGenomics/toil-old
def main():
    """Reports the state of the toil.
    """
    
    ##########################################
    #Construct the arguments.
    ##########################################  
    
    parser = getBasicOptionParser("usage: %prog [--toil] JOB_TREE_DIR [options]", "%prog 0.1")
    
    parser.add_option("--toil", dest="toil",
                      help="Batchjob store path. Can also be specified as the single argument to the script.\
                       default=%default", default='./toil')
    
    parser.add_option("--verbose", dest="verbose", action="store_true",
                      help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%default",
                      default=False)
    
    parser.add_option("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
                      help="Return exit value of 1 if toil jobs not all completed. default=%default",
                      default=False)
    
    options, args = parseBasicOptions(parser)
    logger.info("Parsed arguments")
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)
    
    assert len(args) <= 1 #Only toil may be specified as argument
    if len(args) == 1: #Allow toil directory as arg
        options.toil = args[0]
    
    ##########################################
    #Do some checks.
    ##########################################
    
    logger.info("Checking if we have files for toil")
    assert options.toil != None
    
    ##########################################
    #Survey the status of the batchjob and report.
    ##########################################  
    
    jobStore = loadJobStore(options.toil)
    config = jobStore.config
    toilState = jobStore.loadToilState() #This initialises the object toil.toilState used to track the active toil
    
    failedJobs = [ batchjob for batchjob in toilState.updatedJobs | \
                  set(toilState.childCounts.keys()) \
                  if batchjob.remainingRetryCount == 0 ]
    
    print "There are %i active jobs, %i parent jobs with children, \
    %i totally failed jobs and %i empty jobs (i.e. finished but not cleaned up) \
    currently in toil: %s" % \
    (len(toilState.updatedJobs), len(toilState.childCounts),
     len(failedJobs), len(toilState.shellJobs), options.toil)
    
    if options.verbose: #Verbose currently means outputting the files that have failed.
        for batchjob in failedJobs:
            if batchjob.logJobStoreFileID is not None:
                with batchjob.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, batchjob.jobStoreID, logger.warn)
            else:
                print "Log file for batchjob %s is not present" % batchjob.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"   
    
    if (len(toilState.updatedJobs) + len(toilState.childCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
コード例 #18
0
ファイル: toilStatus.py プロジェクト: arkal/toil
def main():
    """Reports the state of the toil.
    """
    
    ##########################################
    #Construct the arguments.
    ##########################################  
    
    parser = getBasicOptionParser()
    
    parser.add_argument("jobStore", type=str,
              help=("Store in which to place job management files \
              and the global accessed temporary files"
              "(If this is a file path this needs to be globally accessible "
              "by all machines running jobs).\n"
              "If the store already exists and restart is false an"
              " ExistingJobStoreException exception will be thrown."))
    
    parser.add_argument("--verbose", dest="verbose", action="store_true",
                      help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%(default)s",
                      default=False)
    
    parser.add_argument("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
                      help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
                      default=False)
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)
    
    ##########################################
    #Do some checks.
    ##########################################
    
    logger.info("Checking if we have files for toil")
    assert options.jobStore != None
    
    ##########################################
    #Survey the status of the job and report.
    ##########################################  
    
    jobStore = loadJobStore(options.jobStore)
    try:
        rootJob = Job._loadRootJob(jobStore)
    except JobException:
        print "The root job of the jobStore is not present, the toil workflow has probably completed okay"
        sys.exit(0)
    
    toilState = ToilState(jobStore, rootJob )

    # The first element of the toilState.updatedJobs tuple is the jobWrapper we want to inspect
    totalJobs = set(toilState.successorCounts.keys()) | \
                {jobTuple[0] for jobTuple in toilState.updatedJobs}

    failedJobs = [ job for job in totalJobs if job.remainingRetryCount == 0 ]
    
    print "There are %i active jobs, %i parent jobs with children, and \
    %i totally failed jobs currently in toil workflow: %s" % \
    (len(toilState.updatedJobs), len(toilState.successorCounts),
     len(failedJobs), options.jobStore)
    
    if options.verbose: #Verbose currently means outputting the files that have failed.
        for job in failedJobs:
            if job.logJobStoreFileID is not None:
                with job.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, job.jobStoreID, logger.warn)
            else:
                print "Log file for job %s is not present" % job.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"   
    
    if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
コード例 #19
0
def main():
    ##########################################
    #Import necessary modules
    ##########################################

    # This is assuming that worker.py is at a path ending in "/toil/worker.py".
    sourcePath = os.path.dirname(os.path.dirname(__file__))
    if sourcePath not in sys.path:
        # FIXME: prepending to sys.path should fix #103
        sys.path.append(sourcePath)

    #Now we can import all the necessary functions
    from toil.lib.bioio import setLogLevel
    from toil.lib.bioio import getTotalCpuTime
    from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
    from toil.lib.bioio import getTempDirectory
    from toil.lib.bioio import makeSubDir
    from toil.lib.bioio import system
    from toil.common import loadJobStore

    ##########################################
    #Input args
    ##########################################

    jobStoreString = sys.argv[1]
    jobStoreID = sys.argv[2]

    ##########################################
    #Load the jobStore/config file
    ##########################################

    jobStore = loadJobStore(jobStoreString)
    config = jobStore.config

    ##########################################
    #Load the environment for the batchjob
    ##########################################

    #First load the environment for the batchjob.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = cPickle.load(fileHandle)
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    setLogLevel(config.attrib["log_level"])

    ##########################################
    #Setup the temporary directories.
    ##########################################

    #Dir to put all the temp files in.
    localWorkerTempDir = getTempDirectory()

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>

    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")

    #Save the original stdout and stderr (by opening new file descriptors to the
    #same files)
    origStdOut = os.dup(1)
    origStdErr = os.dup(2)

    #Open the file to send stdout/stderr to.
    logFh = os.open(tempWorkerLogPath, os.O_WRONLY | os.O_CREAT | os.O_APPEND)

    #Replace standard output with a descriptor for the log file
    os.dup2(logFh, 1)

    #Replace standard error with a descriptor for the log file
    os.dup2(logFh, 2)

    #Since we only opened the file once, all the descriptors duped from the
    #original will share offset information, and won't clobber each others'
    #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't
    #matter, since O_APPEND seeks to the end of the file before every write, but
    #maybe there's something odd going on...

    #Close the descriptor we used to open the file
    os.close(logFh)

    for handler in list(logger.handlers):  #Remove old handlers
        logger.removeHandler(handler)

    #Add the new handler. The sys.stderr stream has been redirected by swapping
    #the file descriptor out from under it.
    logger.addHandler(logging.StreamHandler(sys.stderr))

    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    workerFailed = False
    try:

        #Put a message at the top of the log, just to make sure it's working.
        print "---TOIL WORKER OUTPUT LOG---"
        sys.stdout.flush()

        #Log the number of open file descriptors so we can tell if we're leaking
        #them.
        logger.debug("Next available file descriptor: {}".format(
            nextOpenDescriptor()))

        ##########################################
        #Load the batchjob
        ##########################################

        batchjob = jobStore.load(jobStoreID)
        logger.debug("Parsed batchjob")

        ##########################################
        #Cleanup from any earlier invocation of the batchjob
        ##########################################

        if batchjob.command == None:
            while len(batchjob.stack) > 0:
                jobs = batchjob.stack[-1]
                #If the jobs still exist they have not been run, so break
                if jobStore.exists(jobs[0][0]):
                    break
                #However, if they are gone then we can remove them from the stack.
                #This is the only way to flush successors that have previously been run
                #, as jobs are, as far as possible, read only in the leader.
                batchjob.stack.pop()

        #This cleans the old log file which may
        #have been left if the batchjob is being retried after a batchjob failure.
        if batchjob.logJobStoreFileID != None:
            batchjob.clearLogFile(jobStore)

        ##########################################
        #Setup the stats, if requested
        ##########################################

        if config.attrib.has_key("stats"):
            startTime = time.time()
            startClock = getTotalCpuTime()
            stats = ET.Element("worker")
        else:
            stats = None

        startTime = time.time()
        while True:
            ##########################################
            #Run the batchjob, if there is one
            ##########################################

            if batchjob.command != None:
                if batchjob.command[:11] == "scriptTree ":
                    #Make a temporary file directory for the job
                    localTempDir = makeSubDir(
                        os.path.join(localWorkerTempDir, "localTempDir"))

                    #Is a job command
                    messages = loadJob(batchjob.command, jobStore)._execute(
                        batchjob=batchjob,
                        stats=stats,
                        localTempDir=localTempDir,
                        jobStore=jobStore)

                    #Remove the temporary file directory
                    shutil.rmtree(localTempDir)

                else:  #Is another command (running outside of jobs may be deprecated)
                    system(batchjob.command)
                    messages = []
            else:
                #The command may be none, in which case
                #the batchjob is just a shell ready to be deleted
                assert len(batchjob.stack) == 0
                messages = []
                break

            ##########################################
            #Establish if we can run another batchjob within the worker
            ##########################################

            #Exceeded the amount of time the worker is allowed to run for so quit
            if time.time() - startTime > float(config.attrib["job_time"]):
                logger.debug(
                    "We are breaking because the maximum time the batchjob should run for has been exceeded"
                )
                break

            #No more jobs to run so quit
            if len(batchjob.stack) == 0:
                break

            #Get the next set of jobs to run
            jobs = batchjob.stack[-1]
            assert len(jobs) > 0

            #If there are 2 or more jobs to run in parallel we quit
            if len(jobs) >= 2:
                logger.debug(
                    "No more jobs can run in series by this worker,"
                    " it's got %i children",
                    len(jobs) - 1)
                break

            #We check the requirements of the batchjob to see if we can run it
            #within the current worker
            successorJobStoreID, successorMemory, successorCpu, successorsDisk, successorPredecessorID = jobs[
                0]
            if successorMemory > batchjob.memory:
                logger.debug(
                    "We need more memory for the next batchjob, so finishing")
                break
            if successorCpu > batchjob.cpu:
                logger.debug(
                    "We need more cpus for the next batchjob, so finishing")
                break
            if successorsDisk > batchjob.disk:
                logger.debug(
                    "We need more disk for the next batchjob, so finishing")
                break
            if successorPredecessorID != None:
                logger.debug(
                    "The batchjob has multiple predecessors, we must return to the leader."
                )
                break

            ##########################################
            #We have a single successor batchjob.
            #We load the successor batchjob and transplant its command and stack
            #into the current batchjob so that it can be run
            #as if it were a command that were part of the current batchjob.
            #We can then delete the successor batchjob in the jobStore, as it is
            #wholly incorporated into the current batchjob.
            ##########################################

            #Remove the successor batchjob
            batchjob.stack.pop()

            #Load the successor batchjob
            successorJob = jobStore.load(successorJobStoreID)
            #These should all match up
            assert successorJob.memory == successorMemory
            assert successorJob.cpu == successorCpu
            assert successorJob.predecessorsFinished == set()
            assert successorJob.predecessorNumber == 1
            assert successorJob.command != None
            assert successorJobStoreID == successorJob.jobStoreID

            #Transplant the command and stack to the current batchjob
            batchjob.command = successorJob.command
            batchjob.stack += successorJob.stack
            assert batchjob.memory >= successorJob.memory
            assert batchjob.cpu >= successorJob.cpu

            #Checkpoint the batchjob and delete the successorJob
            batchjob.jobsToDelete = [successorJob.jobStoreID]
            jobStore.update(batchjob)
            jobStore.delete(successorJob.jobStoreID)

            logger.debug("Starting the next batchjob")

        ##########################################
        #Finish up the stats
        ##########################################

        if stats != None:
            totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            stats.attrib["time"] = str(time.time() - startTime)
            stats.attrib["clock"] = str(totalCpuTime - startClock)
            stats.attrib["memory"] = str(totalMemoryUsage)
            m = ET.SubElement(stats, "messages")
            for message in messages:
                ET.SubElement(m, "message").text = message
            jobStore.writeStatsAndLogging(ET.tostring(stats))
        elif len(messages
                 ) > 0:  #No stats, but still need to report log messages
            l = ET.Element("worker")
            m = ET.SubElement(l, "messages")
            for message in messages:
                ET.SubElement(m, "message").text = message
            jobStore.writeStatsAndLogging(ET.tostring(l))

        logger.info(
            "Finished running the chain of jobs on this node, we ran for a total of %f seconds",
            time.time() - startTime)

    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except:  #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error(
            "Exiting the worker because of a failed batchjob on host %s",
            socket.gethostname())
        batchjob = jobStore.load(jobStoreID)
        batchjob.setupJobAfterFailure(config)
        workerFailed = True

    ##########################################
    #Cleanup
    ##########################################

    #Close the worker logging
    #Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    #Flush at the OS level
    os.fsync(1)
    os.fsync(2)

    #Close redirected stdout and replace with the original standard output.
    os.dup2(origStdOut, 1)

    #Close redirected stderr and replace with the original standard error.
    os.dup2(origStdOut, 2)

    #sys.stdout and sys.stderr don't need to be modified at all. We don't need
    #to call redirectLoggerStreamHandlers since they still log to sys.stderr

    #Close our extra handles to the original standard output and standard error
    #streams, so we don't leak file handles.
    os.close(origStdOut)
    os.close(origStdErr)

    #Now our file handles are in exactly the state they were in before.

    #Copy back the log file to the global dir, if needed
    if workerFailed:
        truncateFile(tempWorkerLogPath)
        batchjob.setLogFile(tempWorkerLogPath, jobStore)
        os.remove(tempWorkerLogPath)
        jobStore.update(batchjob)

    #Remove the temp dir
    shutil.rmtree(localWorkerTempDir)

    #This must happen after the log file is done with, else there is no place to put the log
    if (not workerFailed) and batchjob.command == None and len(
            batchjob.stack) == 0:
        #We can now safely get rid of the batchjob
        jobStore.delete(batchjob.jobStoreID)
コード例 #20
0
def main():
    """Reports the state of the toil.
    """

    ##########################################
    #Construct the arguments.
    ##########################################

    parser = getBasicOptionParser(
        "usage: %prog [--jobStore] JOB_TREE_DIR [options]", "%prog 0.1")

    parser.add_option(
        "--jobStore",
        dest="jobStore",
        help=
        "Job store path. Can also be specified as the single argument to the script.\
                       default=%default",
        default=os.path.abspath("./toil"))

    parser.add_option(
        "--verbose",
        dest="verbose",
        action="store_true",
        help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%default",
        default=False)

    parser.add_option(
        "--failIfNotComplete",
        dest="failIfNotComplete",
        action="store_true",
        help=
        "Return exit value of 1 if toil jobs not all completed. default=%default",
        default=False)

    options, args = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)

    assert len(args) <= 1  #Only toil may be specified as argument
    if len(args) == 1:  #Allow toil directory as arg
        options.jobStore = args[0]

    ##########################################
    #Do some checks.
    ##########################################

    logger.info("Checking if we have files for toil")
    assert options.jobStore != None

    ##########################################
    #Survey the status of the job and report.
    ##########################################

    jobStore = loadJobStore(options.jobStore)
    try:
        rootJob = Job._loadRootJob(jobStore)
    except JobException:
        print "The root job of the jobStore is not present, the toil workflow has probably completed okay"
        sys.exit(0)

    toilState = ToilState(jobStore, rootJob)

    failedJobs = [ job for job in toilState.updatedJobs | \
                  set(toilState.successorCounts.keys()) \
                  if job.remainingRetryCount == 0 ]

    print "There are %i active jobs, %i parent jobs with children, and \
    %i totally failed jobs currently in toil workflow: %s"                                                           % \
    (len(toilState.updatedJobs), len(toilState.successorCounts),
     len(failedJobs), options.jobStore)

    if options.verbose:  #Verbose currently means outputting the files that have failed.
        for job in failedJobs:
            if job.logJobStoreFileID is not None:
                with job.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, job.jobStoreID, logger.warn)
            else:
                print "Log file for job %s is not present" % job.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"

    if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
コード例 #21
0
ファイル: toilStatus.py プロジェクト: BD2KGenomics/toil-old
def main():
    """Reports the state of the toil.
    """

    ##########################################
    #Construct the arguments.
    ##########################################

    parser = getBasicOptionParser(
        "usage: %prog [--toil] JOB_TREE_DIR [options]", "%prog 0.1")

    parser.add_option(
        "--toil",
        dest="toil",
        help=
        "Batchjob store path. Can also be specified as the single argument to the script.\
                       default=%default",
        default='./toil')

    parser.add_option(
        "--verbose",
        dest="verbose",
        action="store_true",
        help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%default",
        default=False)

    parser.add_option(
        "--failIfNotComplete",
        dest="failIfNotComplete",
        action="store_true",
        help=
        "Return exit value of 1 if toil jobs not all completed. default=%default",
        default=False)

    options, args = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)

    assert len(args) <= 1  #Only toil may be specified as argument
    if len(args) == 1:  #Allow toil directory as arg
        options.toil = args[0]

    ##########################################
    #Do some checks.
    ##########################################

    logger.info("Checking if we have files for toil")
    assert options.toil != None

    ##########################################
    #Survey the status of the batchjob and report.
    ##########################################

    jobStore = loadJobStore(options.toil)
    config = jobStore.config
    toilState = jobStore.loadToilState(
    )  #This initialises the object toil.toilState used to track the active toil

    failedJobs = [ batchjob for batchjob in toilState.updatedJobs | \
                  set(toilState.childCounts.keys()) \
                  if batchjob.remainingRetryCount == 0 ]

    print "There are %i active jobs, %i parent jobs with children, \
    %i totally failed jobs and %i empty jobs (i.e. finished but not cleaned up) \
    currently in toil: %s"                           % \
    (len(toilState.updatedJobs), len(toilState.childCounts),
     len(failedJobs), len(toilState.shellJobs), options.toil)

    if options.verbose:  #Verbose currently means outputting the files that have failed.
        for batchjob in failedJobs:
            if batchjob.logJobStoreFileID is not None:
                with batchjob.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, batchjob.jobStoreID, logger.warn)
            else:
                print "Log file for batchjob %s is not present" % batchjob.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"

    if (len(toilState.updatedJobs) + len(toilState.childCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
コード例 #22
0
def main():
    logging.basicConfig()

    ##########################################
    #Import necessary modules 
    ##########################################
    
    # This is assuming that worker.py is at a path ending in "/toil/worker.py".
    sourcePath = os.path.dirname(os.path.dirname(__file__))
    if sourcePath not in sys.path:
        sys.path.append(sourcePath)
    
    #Now we can import all the necessary functions
    from toil.lib.bioio import setLogLevel
    from toil.lib.bioio import getTotalCpuTime
    from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
    from toil.lib.bioio import makePublicDir
    from toil.lib.bioio import system
    from toil.common import loadJobStore
    from toil.job import Job
    
    ########################################## 
    #Input args
    ##########################################
    
    jobStoreString = sys.argv[1]
    jobStoreID = sys.argv[2]
    
    ##########################################
    #Load the jobStore/config file
    ##########################################
    
    jobStore = loadJobStore(jobStoreString)
    config = jobStore.config
    
    ##########################################
    #Create the worker killer, if requested
    ##########################################

    if config.badWorker > 0 and random.random() < config.badWorker:
        def badWorker():
            #This will randomly kill the worker process at a random time 
            time.sleep(config.badWorkerFailInterval * random.random())
            os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
            #TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
        t = Thread(target=badWorker)
        t.daemon = True
        t.start()

    ##########################################
    #Load the environment for the jobWrapper
    ##########################################
    
    #First load the environment for the jobWrapper.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = cPickle.load(fileHandle)
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    setLogLevel(config.logLevel)

    tempRootDir = config.workDir
    if tempRootDir is not None and not os.path.exists(tempRootDir):
        raise RuntimeError("The temporary directory specified by workDir: %s does not exist" % tempRootDir)

    ##########################################
    #Setup the temporary directories.
    ##########################################
        
    #Dir to put all the temp files in. If tempRootDir is None, tempdir looks at environment variables to determine
    # where to put the tempDir.
    localWorkerTempDir = tempfile.mkdtemp(dir=tempRootDir)
    os.chmod(localWorkerTempDir, 0755)

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>
    
    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
    
    #Save the original stdout and stderr (by opening new file descriptors to the
    #same files)
    origStdOut = os.dup(1)
    origStdErr = os.dup(2)

    #Open the file to send stdout/stderr to.
    logFh = os.open(tempWorkerLogPath, os.O_WRONLY | os.O_CREAT | os.O_APPEND)

    #Replace standard output with a descriptor for the log file
    os.dup2(logFh, 1)
    
    #Replace standard error with a descriptor for the log file
    os.dup2(logFh, 2)
    
    #Since we only opened the file once, all the descriptors duped from the
    #original will share offset information, and won't clobber each others'
    #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't
    #matter, since O_APPEND seeks to the end of the file before every write, but
    #maybe there's something odd going on...
    
    #Close the descriptor we used to open the file
    os.close(logFh)

    for handler in list(logger.handlers): #Remove old handlers
        logger.removeHandler(handler)
    
    #Add the new handler. The sys.stderr stream has been redirected by swapping
    #the file descriptor out from under it.
    logger.addHandler(logging.StreamHandler(sys.stderr))

    debugging = logging.getLogger().isEnabledFor(logging.DEBUG)
    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    workerFailed = False
    statsDict = MagicExpando()
    statsDict.jobs = []
    messages = []
    blockFn = lambda : True
    cleanCacheFn = lambda x : True
    try:

        #Put a message at the top of the log, just to make sure it's working.
        print "---TOIL WORKER OUTPUT LOG---"
        sys.stdout.flush()
        
        #Log the number of open file descriptors so we can tell if we're leaking
        #them.
        logger.debug("Next available file descriptor: {}".format(
            nextOpenDescriptor()))
    
        ##########################################
        #Load the jobWrapper
        ##########################################
        
        jobWrapper = jobStore.load(jobStoreID)
        logger.debug("Parsed jobWrapper")
        
        ##########################################
        #Cleanup from any earlier invocation of the jobWrapper
        ##########################################
        
        if jobWrapper.command == None:
            while len(jobWrapper.stack) > 0:
                jobs = jobWrapper.stack[-1]
                #If the jobs still exist they have not been run, so break
                if jobStore.exists(jobs[0][0]):
                    break
                #However, if they are gone then we can remove them from the stack.
                #This is the only way to flush successors that have previously been run
                #, as jobs are, as far as possible, read only in the leader.
                jobWrapper.stack.pop()
                
        #This cleans the old log file which may 
        #have been left if the jobWrapper is being retried after a jobWrapper failure.
        oldLogFile = jobWrapper.logJobStoreFileID
        jobWrapper.logJobStoreFileID = None
        jobStore.update(jobWrapper) #Update first, before deleting the file
        if oldLogFile != None:
            jobStore.delete(oldLogFile)
            
        #Make a temporary file directory for the jobWrapper
        localTempDir = makePublicDir(os.path.join(localWorkerTempDir, "localTempDir"))
    
        ##########################################
        #Setup the stats, if requested
        ##########################################
        
        if config.stats:
            startTime = time.time()
            startClock = getTotalCpuTime()

        startTime = time.time() 
        while True:
            ##########################################
            #Run the jobWrapper, if there is one
            ##########################################
            
            if jobWrapper.command != None:
                if jobWrapper.command.startswith( "_toil " ):
                    #Load the job
                    job = Job._loadJob(jobWrapper.command, jobStore)
                    
                    #Cleanup the cache from the previous job
                    cleanCacheFn(job.effectiveRequirements(jobStore.config).cache)
                    
                    #Create a fileStore object for the job
                    fileStore = Job.FileStore(jobStore, jobWrapper, localTempDir, 
                                              blockFn)
                    #Get the next block function and list that will contain any messages
                    blockFn = fileStore._blockFn
                    messages = fileStore.loggingMessages

                    job._execute(jobWrapper=jobWrapper,
                                           stats=statsDict if config.stats else None,
                                           localTempDir=localTempDir,
                                           jobStore=jobStore,
                                           fileStore=fileStore)

                    #Set the clean cache function
                    cleanCacheFn = fileStore._cleanLocalTempDir
                    
                else: #Is another command (running outside of jobs may be deprecated)
                    #Cleanup the cache from the previous job
                    cleanCacheFn(0)
                    
                    system(jobWrapper.command)
                    #Set a dummy clean cache fn
                    cleanCacheFn = lambda x : None
            else:
                #The command may be none, in which case
                #the jobWrapper is either a shell ready to be deleted or has 
                #been scheduled after a failure to cleanup
                break
            
            if Job.FileStore._terminateEvent.isSet():
                raise RuntimeError("The termination flag is set")

            ##########################################
            #Establish if we can run another jobWrapper within the worker
            ##########################################
            
            #No more jobs to run so quit
            if len(jobWrapper.stack) == 0:
                break
            
            #Get the next set of jobs to run
            jobs = jobWrapper.stack[-1]
            assert len(jobs) > 0
            
            #If there are 2 or more jobs to run in parallel we quit
            if len(jobs) >= 2:
                logger.debug("No more jobs can run in series by this worker,"
                            " it's got %i children", len(jobs)-1)
                break
            
            #We check the requirements of the jobWrapper to see if we can run it
            #within the current worker
            successorJobStoreID, successorMemory, successorCores, successorsDisk, successorPredecessorID = jobs[0]
            if successorMemory > jobWrapper.memory:
                logger.debug("We need more memory for the next jobWrapper, so finishing")
                break
            if successorCores > jobWrapper.cores:
                logger.debug("We need more cores for the next jobWrapper, so finishing")
                break
            if successorsDisk > jobWrapper.disk:
                logger.debug("We need more disk for the next jobWrapper, so finishing")
                break
            if successorPredecessorID != None: 
                logger.debug("The jobWrapper has multiple predecessors, we must return to the leader.")
                break
          
            ##########################################
            #We have a single successor jobWrapper.
            #We load the successor jobWrapper and transplant its command and stack
            #into the current jobWrapper so that it can be run
            #as if it were a command that were part of the current jobWrapper.
            #We can then delete the successor jobWrapper in the jobStore, as it is
            #wholly incorporated into the current jobWrapper.
            ##########################################
            
            #Clone the jobWrapper and its stack
            jobWrapper = copy.deepcopy(jobWrapper)
            
            #Remove the successor jobWrapper
            jobWrapper.stack.pop()
            
            #Load the successor jobWrapper
            successorJob = jobStore.load(successorJobStoreID)
            #These should all match up
            assert successorJob.memory == successorMemory
            assert successorJob.cores == successorCores
            assert successorJob.predecessorsFinished == set()
            assert successorJob.predecessorNumber == 1
            assert successorJob.command != None
            assert successorJobStoreID == successorJob.jobStoreID
            
            #Transplant the command and stack to the current jobWrapper
            jobWrapper.command = successorJob.command
            jobWrapper.stack += successorJob.stack
            assert jobWrapper.memory >= successorJob.memory
            assert jobWrapper.cores >= successorJob.cores
            
            #Build a fileStore to update the job
            fileStore = Job.FileStore(jobStore, jobWrapper, localTempDir, blockFn)
            
            #Update blockFn
            blockFn = fileStore._blockFn
            
            #Add successorJob to those to be deleted
            fileStore.jobsToDelete.add(successorJob.jobStoreID)
            
            #This will update the job once the previous job is done
            fileStore._updateJobWhenDone()            
            
            #Clone the jobWrapper and its stack again, so that updates to it do 
            #not interfere with this update
            jobWrapper = copy.deepcopy(jobWrapper)
            
            logger.debug("Starting the next jobWrapper")
        
        ##########################################
        #Finish up the stats
        ##########################################
        if config.stats:
            totalCPUTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            statsDict.workers.time = str(time.time() - startTime)
            statsDict.workers.clock = str(totalCPUTime - startClock)
            statsDict.workers.memory = str(totalMemoryUsage)
            statsDict.workers.log = messages
        
        logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds", time.time() - startTime)
    
    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except: #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error("Exiting the worker because of a failed jobWrapper on host %s", socket.gethostname())
        Job.FileStore._terminateEvent.set()
    
    ##########################################
    #Wait for the asynchronous chain of writes/updates to finish
    ########################################## 
       
    blockFn() 
    
    ##########################################
    #All the asynchronous worker/update threads must be finished now, 
    #so safe to test if they completed okay
    ########################################## 
    
    if Job.FileStore._terminateEvent.isSet():
        jobWrapper = jobStore.load(jobStoreID)
        jobWrapper.setupJobAfterFailure(config)
        workerFailed = True

    ##########################################
    #Cleanup
    ##########################################
    
    #Close the worker logging
    #Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    #Flush at the OS level
    os.fsync(1)
    os.fsync(2)
    
    #Close redirected stdout and replace with the original standard output.
    os.dup2(origStdOut, 1)
    
    #Close redirected stderr and replace with the original standard error.
    os.dup2(origStdOut, 2)
    
    #sys.stdout and sys.stderr don't need to be modified at all. We don't need
    #to call redirectLoggerStreamHandlers since they still log to sys.stderr
    
    #Close our extra handles to the original standard output and standard error
    #streams, so we don't leak file handles.
    os.close(origStdOut)
    os.close(origStdErr)
    
    #Now our file handles are in exactly the state they were in before.
    
    #Copy back the log file to the global dir, if needed
    if workerFailed:
        truncateFile(tempWorkerLogPath)
        jobWrapper.logJobStoreFileID = jobStore.writeFile( tempWorkerLogPath, jobWrapper.jobStoreID )
        os.remove(tempWorkerLogPath)
        jobStore.update(jobWrapper)
    elif debugging: # write log messages
        truncateFile(tempWorkerLogPath)
        with open(tempWorkerLogPath, 'r') as logFile:
            logMessages = logFile.read().splitlines()
        statsDict.logs = [Expando(jobStoreID=jobStoreID,text=logMessage) for logMessage in logMessages]

    if (debugging or config.stats or messages) and not workerFailed: # We have stats/logging to report back
        jobStore.writeStatsAndLogging(json.dumps(statsDict))

    #Remove the temp dir
    shutil.rmtree(localWorkerTempDir)
    
    #This must happen after the log file is done with, else there is no place to put the log
    if (not workerFailed) and jobWrapper.command == None and len(jobWrapper.stack) == 0:
        #We can now safely get rid of the jobWrapper
        jobStore.delete(jobWrapper.jobStoreID)
コード例 #23
0
ファイル: toilStatus.py プロジェクト: benedictpaten/toil
def main():
    """Reports the state of the toil.
    """
    
    ##########################################
    #Construct the arguments.
    ##########################################  
    
    parser = getBasicOptionParser("usage: %prog [--jobStore] JOB_TREE_DIR [options]", "%prog 0.1")
    
    parser.add_option("--jobStore", dest="jobStore",
                      help="Job store path. Can also be specified as the single argument to the script.\
                       default=%default", default=os.path.abspath("./toil"))
    
    parser.add_option("--verbose", dest="verbose", action="store_true",
                      help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%default",
                      default=False)
    
    parser.add_option("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
                      help="Return exit value of 1 if toil jobs not all completed. default=%default",
                      default=False)
    
    options, args = parseBasicOptions(parser)
    logger.info("Parsed arguments")
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)
    
    assert len(args) <= 1 #Only toil may be specified as argument
    if len(args) == 1: #Allow toil directory as arg
        options.jobStore = args[0]
    
    ##########################################
    #Do some checks.
    ##########################################
    
    logger.info("Checking if we have files for toil")
    assert options.jobStore != None
    
    ##########################################
    #Survey the status of the job and report.
    ##########################################  
    
    jobStore = loadJobStore(options.jobStore)
    try:
        rootJob = Job._loadRootJob(jobStore)
    except JobException:
        print "The root job of the jobStore is not present, the toil workflow has probably completed okay"
        sys.exit(0)
    
    toilState = ToilState(jobStore, rootJob )
    
    failedJobs = [ job for job in toilState.updatedJobs | \
                  set(toilState.successorCounts.keys()) \
                  if job.remainingRetryCount == 0 ]
    
    print "There are %i active jobs, %i parent jobs with children, and \
    %i totally failed jobs currently in toil workflow: %s" % \
    (len(toilState.updatedJobs), len(toilState.successorCounts),
     len(failedJobs), options.jobStore)
    
    if options.verbose: #Verbose currently means outputting the files that have failed.
        for job in failedJobs:
            if job.logJobStoreFileID is not None:
                with job.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, job.jobStoreID, logger.warn)
            else:
                print "Log file for job %s is not present" % job.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"   
    
    if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
コード例 #24
0
ファイル: toilStatus.py プロジェクト: adamnovak/toil
def main():
    """Reports the state of the toil.
    """

    ##########################################
    # Construct the arguments.
    ##########################################

    parser = getBasicOptionParser()

    parser.add_argument(
        "jobStore",
        type=str,
        help=(
            "Store in which to place job management files \
              and the global accessed temporary files"
            "(If this is a file path this needs to be globally accessible "
            "by all machines running jobs).\n"
            "If the store already exists and restart is false an"
            " ExistingJobStoreException exception will be thrown."
        ),
    )

    parser.add_argument(
        "--verbose",
        dest="verbose",
        action="store_true",
        help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%(default)s",
        default=False,
    )

    parser.add_argument(
        "--failIfNotComplete",
        dest="failIfNotComplete",
        action="store_true",
        help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
        default=False,
    )
    parser.add_argument("--version", action="version", version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)

    ##########################################
    # Do some checks.
    ##########################################

    logger.info("Checking if we have files for toil")
    assert options.jobStore != None

    ##########################################
    # Survey the status of the job and report.
    ##########################################

    jobStore = loadJobStore(options.jobStore)
    try:
        rootJob = Job._loadRootJob(jobStore)
    except JobException:
        print "The root job of the jobStore is not present, the toil workflow has probably completed okay"
        sys.exit(0)

    toilState = ToilState(jobStore, rootJob)

    failedJobs = [
        job for job in toilState.updatedJobs | set(toilState.successorCounts.keys()) if job.remainingRetryCount == 0
    ]

    print "There are %i active jobs, %i parent jobs with children, and \
    %i totally failed jobs currently in toil workflow: %s" % (
        len(toilState.updatedJobs),
        len(toilState.successorCounts),
        len(failedJobs),
        options.jobStore,
    )

    if options.verbose:  # Verbose currently means outputting the files that have failed.
        for job in failedJobs:
            if job.logJobStoreFileID is not None:
                with job.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, job.jobStoreID, logger.warn)
            else:
                print "Log file for job %s is not present" % job.jobStoreID
        if len(failedJobs) == 0:
            print "There are no failed jobs to report"

    if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and options.failIfNotComplete:
        sys.exit(1)
コード例 #25
0
ファイル: worker.py プロジェクト: kellrott/toil
def main():
    logging.basicConfig()

    ##########################################
    #Import necessary modules 
    ##########################################
    
    # This is assuming that worker.py is at a path ending in "/toil/worker.py".
    sourcePath = os.path.dirname(os.path.dirname(__file__))
    if sourcePath not in sys.path:
        sys.path.append(sourcePath)
    
    #Now we can import all the necessary functions
    from toil.lib.bioio import setLogLevel
    from toil.lib.bioio import getTotalCpuTime
    from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
    from toil.lib.bioio import makePublicDir
    from toil.lib.bioio import system
    from toil.common import loadJobStore
    from toil.job import Job
    
    ########################################## 
    #Input args
    ##########################################
    
    jobStoreString = sys.argv[1]
    jobStoreID = sys.argv[2]
    
    ##########################################
    #Load the jobStore/config file
    ##########################################
    
    jobStore = loadJobStore(jobStoreString)
    config = jobStore.config
    
    ##########################################
    #Create the worker killer, if requested
    ##########################################

    if config.badWorker > 0 and random.random() < config.badWorker:
        def badWorker():
            #This will randomly kill the worker process at a random time 
            time.sleep(config.badWorkerFailInterval * random.random())
            os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
            #TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
        t = Thread(target=badWorker)
        t.daemon = True
        t.start()

    ##########################################
    #Load the environment for the jobWrapper
    ##########################################
    
    #First load the environment for the jobWrapper.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = cPickle.load(fileHandle)
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    setLogLevel(config.logLevel)

    tempRootDir = config.workDir
    if tempRootDir is not None and not os.path.exists(tempRootDir):
        raise RuntimeError("The temporary directory specified by workDir: %s does not exist" % tempRootDir)

    ##########################################
    #Setup the temporary directories.
    ##########################################
        
    #Dir to put all the temp files in. If tempRootDir is None, tempdir looks at environment variables to determine
    # where to put the tempDir.
    localWorkerTempDir = tempfile.mkdtemp(dir=tempRootDir)
    os.chmod(localWorkerTempDir, 0755)

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>
    
    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
    
    #Save the original stdout and stderr (by opening new file descriptors to the
    #same files)
    origStdOut = os.dup(1)
    origStdErr = os.dup(2)

    #Open the file to send stdout/stderr to.
    logFh = os.open(tempWorkerLogPath, os.O_WRONLY | os.O_CREAT | os.O_APPEND)

    #Replace standard output with a descriptor for the log file
    os.dup2(logFh, 1)
    
    #Replace standard error with a descriptor for the log file
    os.dup2(logFh, 2)
    
    #Since we only opened the file once, all the descriptors duped from the
    #original will share offset information, and won't clobber each others'
    #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't
    #matter, since O_APPEND seeks to the end of the file before every write, but
    #maybe there's something odd going on...
    
    #Close the descriptor we used to open the file
    os.close(logFh)

    for handler in list(logger.handlers): #Remove old handlers
        logger.removeHandler(handler)
    
    #Add the new handler. The sys.stderr stream has been redirected by swapping
    #the file descriptor out from under it.
    logger.addHandler(logging.StreamHandler(sys.stderr))

    debugging = logging.getLogger().isEnabledFor(logging.DEBUG)
    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    workerFailed = False
    statsDict = MagicExpando()
    messages = []
    blockFn = lambda : True
    cleanCacheFn = lambda x : True
    try:

        #Put a message at the top of the log, just to make sure it's working.
        print "---TOIL WORKER OUTPUT LOG---"
        sys.stdout.flush()
        
        #Log the number of open file descriptors so we can tell if we're leaking
        #them.
        logger.debug("Next available file descriptor: {}".format(
            nextOpenDescriptor()))
    
        ##########################################
        #Load the jobWrapper
        ##########################################
        
        jobWrapper = jobStore.load(jobStoreID)
        logger.debug("Parsed jobWrapper")
        
        ##########################################
        #Cleanup from any earlier invocation of the jobWrapper
        ##########################################
        
        if jobWrapper.command == None:
            while len(jobWrapper.stack) > 0:
                jobs = jobWrapper.stack[-1]
                #If the jobs still exist they have not been run, so break
                if jobStore.exists(jobs[0][0]):
                    break
                #However, if they are gone then we can remove them from the stack.
                #This is the only way to flush successors that have previously been run
                #, as jobs are, as far as possible, read only in the leader.
                jobWrapper.stack.pop()
                
        #This cleans the old log file which may 
        #have been left if the jobWrapper is being retried after a jobWrapper failure.
        oldLogFile = jobWrapper.logJobStoreFileID
        jobWrapper.logJobStoreFileID = None
        jobStore.update(jobWrapper) #Update first, before deleting the file
        if oldLogFile != None:
            jobStore.delete(oldLogFile)
            
        #Make a temporary file directory for the jobWrapper
        localTempDir = makePublicDir(os.path.join(localWorkerTempDir, "localTempDir"))
    
        ##########################################
        #Setup the stats, if requested
        ##########################################
        
        if config.stats:
            startTime = time.time()
            startClock = getTotalCpuTime()

        startTime = time.time() 
        while True:
            ##########################################
            #Run the jobWrapper, if there is one
            ##########################################
            
            if jobWrapper.command != None:
                if jobWrapper.command.startswith( "_toil " ):
                    #Load the job
                    job = Job._loadJob(jobWrapper.command, jobStore)
                    
                    #Cleanup the cache from the previous job
                    cleanCacheFn(job.effectiveRequirements(jobStore.config).cache)
                    
                    #Create a fileStore object for the job
                    fileStore = Job.FileStore(jobStore, jobWrapper, localTempDir, 
                                              blockFn)
                    #Get the next block function and list that will contain any messages
                    blockFn = fileStore._blockFn
                    messages = fileStore.loggingMessages

                    job._execute(jobWrapper=jobWrapper,
                                           stats=statsDict if config.stats else None,
                                           localTempDir=localTempDir,
                                           jobStore=jobStore,
                                           fileStore=fileStore)

                    #Set the clean cache function
                    cleanCacheFn = fileStore._cleanLocalTempDir
                    
                else: #Is another command (running outside of jobs may be deprecated)
                    #Cleanup the cache from the previous job
                    cleanCacheFn(0)
                    
                    system(jobWrapper.command)
                    #Set a dummy clean cache fn
                    cleanCacheFn = lambda x : None
            else:
                #The command may be none, in which case
                #the jobWrapper is either a shell ready to be deleted or has 
                #been scheduled after a failure to cleanup
                break
            
            if Job.FileStore._terminateEvent.isSet():
                raise RuntimeError("The termination flag is set")

            ##########################################
            #Establish if we can run another jobWrapper within the worker
            ##########################################
            
            #No more jobs to run so quit
            if len(jobWrapper.stack) == 0:
                break
            
            #Get the next set of jobs to run
            jobs = jobWrapper.stack[-1]
            assert len(jobs) > 0
            
            #If there are 2 or more jobs to run in parallel we quit
            if len(jobs) >= 2:
                logger.debug("No more jobs can run in series by this worker,"
                            " it's got %i children", len(jobs)-1)
                break
            
            #We check the requirements of the jobWrapper to see if we can run it
            #within the current worker
            successorJobStoreID, successorMemory, successorCores, successorsDisk, successorPredecessorID = jobs[0]
            if successorMemory > jobWrapper.memory:
                logger.debug("We need more memory for the next jobWrapper, so finishing")
                break
            if successorCores > jobWrapper.cores:
                logger.debug("We need more cores for the next jobWrapper, so finishing")
                break
            if successorsDisk > jobWrapper.disk:
                logger.debug("We need more disk for the next jobWrapper, so finishing")
                break
            if successorPredecessorID != None: 
                logger.debug("The jobWrapper has multiple predecessors, we must return to the leader.")
                break
          
            ##########################################
            #We have a single successor jobWrapper.
            #We load the successor jobWrapper and transplant its command and stack
            #into the current jobWrapper so that it can be run
            #as if it were a command that were part of the current jobWrapper.
            #We can then delete the successor jobWrapper in the jobStore, as it is
            #wholly incorporated into the current jobWrapper.
            ##########################################
            
            #Clone the jobWrapper and its stack
            jobWrapper = copy.deepcopy(jobWrapper)
            
            #Remove the successor jobWrapper
            jobWrapper.stack.pop()
            
            #Load the successor jobWrapper
            successorJob = jobStore.load(successorJobStoreID)
            #These should all match up
            assert successorJob.memory == successorMemory
            assert successorJob.cores == successorCores
            assert successorJob.predecessorsFinished == set()
            assert successorJob.predecessorNumber == 1
            assert successorJob.command != None
            assert successorJobStoreID == successorJob.jobStoreID
            
            #Transplant the command and stack to the current jobWrapper
            jobWrapper.command = successorJob.command
            jobWrapper.stack += successorJob.stack
            assert jobWrapper.memory >= successorJob.memory
            assert jobWrapper.cores >= successorJob.cores
            
            #Build a fileStore to update the job
            fileStore = Job.FileStore(jobStore, jobWrapper, localTempDir, blockFn)
            
            #Update blockFn
            blockFn = fileStore._blockFn
            
            #Add successorJob to those to be deleted
            fileStore.jobsToDelete.add(successorJob.jobStoreID)
            
            #This will update the job once the previous job is done
            fileStore._updateJobWhenDone()            
            
            #Clone the jobWrapper and its stack again, so that updates to it do 
            #not interfere with this update
            jobWrapper = copy.deepcopy(jobWrapper)
            
            logger.debug("Starting the next jobWrapper")
        
        ##########################################
        #Finish up the stats
        ##########################################
        if config.stats:
            totalCPUTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            statsDict.workers.time = str(time.time() - startTime)
            statsDict.workers.clock = str(totalCPUTime - startClock)
            statsDict.workers.memory = str(totalMemoryUsage)
            statsDict.workers.log = messages
        
        logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds", time.time() - startTime)
    
    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except: #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error("Exiting the worker because of a failed jobWrapper on host %s", socket.gethostname())
        Job.FileStore._terminateEvent.set()
    
    ##########################################
    #Wait for the asynchronous chain of writes/updates to finish
    ########################################## 
       
    blockFn() 
    
    ##########################################
    #All the asynchronous worker/update threads must be finished now, 
    #so safe to test if they completed okay
    ########################################## 
    
    if Job.FileStore._terminateEvent.isSet():
        jobWrapper = jobStore.load(jobStoreID)
        jobWrapper.setupJobAfterFailure(config)
        workerFailed = True

    ##########################################
    #Cleanup
    ##########################################
    
    #Close the worker logging
    #Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    #Flush at the OS level
    os.fsync(1)
    os.fsync(2)
    
    #Close redirected stdout and replace with the original standard output.
    os.dup2(origStdOut, 1)
    
    #Close redirected stderr and replace with the original standard error.
    os.dup2(origStdOut, 2)
    
    #sys.stdout and sys.stderr don't need to be modified at all. We don't need
    #to call redirectLoggerStreamHandlers since they still log to sys.stderr
    
    #Close our extra handles to the original standard output and standard error
    #streams, so we don't leak file handles.
    os.close(origStdOut)
    os.close(origStdErr)
    
    #Now our file handles are in exactly the state they were in before.
    
    #Copy back the log file to the global dir, if needed
    if workerFailed:
        truncateFile(tempWorkerLogPath)
        jobWrapper.logJobStoreFileID = jobStore.writeFile( tempWorkerLogPath, jobWrapper.jobStoreID )
        os.remove(tempWorkerLogPath)
        jobStore.update(jobWrapper)
    elif debugging: # write log messages
        truncateFile(tempWorkerLogPath)
        with open(tempWorkerLogPath, 'r') as logFile:
            logMessages = logFile.read().splitlines()
        statsDict.logs = [Expando(jobStoreID=jobStoreID,text=logMessage) for logMessage in logMessages]

    if (debugging or config.stats or messages) and not workerFailed: # We have stats/logging to report back
        jobStore.writeStatsAndLogging(json.dumps(statsDict))

    #Remove the temp dir
    shutil.rmtree(localWorkerTempDir)
    
    #This must happen after the log file is done with, else there is no place to put the log
    if (not workerFailed) and jobWrapper.command == None and len(jobWrapper.stack) == 0:
        #We can now safely get rid of the jobWrapper
        jobStore.delete(jobWrapper.jobStoreID)
コード例 #26
0
ファイル: worker.py プロジェクト: BD2KGenomics/toil-old
def main():
    ########################################## 
    #Import necessary modules 
    ##########################################
    
    # This is assuming that worker.py is at a path ending in "/toil/worker.py".
    sourcePath = os.path.dirname(os.path.dirname(__file__))
    if sourcePath not in sys.path:
        # FIXME: prepending to sys.path should fix #103
        sys.path.append(sourcePath)
    
    #Now we can import all the necessary functions
    from toil.lib.bioio import setLogLevel
    from toil.lib.bioio import getTotalCpuTime
    from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
    from toil.lib.bioio import getTempDirectory
    from toil.lib.bioio import makeSubDir
    from toil.lib.bioio import system
    from toil.common import loadJobStore
    
    ########################################## 
    #Input args
    ##########################################
    
    jobStoreString = sys.argv[1]
    jobStoreID = sys.argv[2]
    
    ##########################################
    #Load the jobStore/config file
    ##########################################
    
    jobStore = loadJobStore(jobStoreString)
    config = jobStore.config

    ##########################################
    #Load the environment for the batchjob
    ##########################################
    
    #First load the environment for the batchjob.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = cPickle.load(fileHandle)
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    setLogLevel(config.attrib["log_level"])

    ##########################################
    #Setup the temporary directories.
    ##########################################
        
    #Dir to put all the temp files in.
    localWorkerTempDir = getTempDirectory()
    
    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>
    
    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?    
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
    
    #Save the original stdout and stderr (by opening new file descriptors to the
    #same files)
    origStdOut = os.dup(1)
    origStdErr = os.dup(2)
    
    #Open the file to send stdout/stderr to.
    logFh = os.open(tempWorkerLogPath, os.O_WRONLY | os.O_CREAT | os.O_APPEND)

    #Replace standard output with a descriptor for the log file
    os.dup2(logFh, 1)
    
    #Replace standard error with a descriptor for the log file
    os.dup2(logFh, 2)
    
    #Since we only opened the file once, all the descriptors duped from the
    #original will share offset information, and won't clobber each others'
    #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't
    #matter, since O_APPEND seeks to the end of the file before every write, but
    #maybe there's something odd going on...
    
    #Close the descriptor we used to open the file
    os.close(logFh)
    
    for handler in list(logger.handlers): #Remove old handlers
        logger.removeHandler(handler)
    
    #Add the new handler. The sys.stderr stream has been redirected by swapping
    #the file descriptor out from under it.
    logger.addHandler(logging.StreamHandler(sys.stderr))

    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    workerFailed = False
    try:

        #Put a message at the top of the log, just to make sure it's working.
        print "---TOIL WORKER OUTPUT LOG---"
        sys.stdout.flush()
        
        #Log the number of open file descriptors so we can tell if we're leaking
        #them.
        logger.debug("Next available file descriptor: {}".format(
            nextOpenDescriptor()))
    
        ##########################################
        #Load the batchjob
        ##########################################
        
        batchjob = jobStore.load(jobStoreID)
        logger.debug("Parsed batchjob")
        
        ##########################################
        #Cleanup from any earlier invocation of the batchjob
        ##########################################
        
        if batchjob.command == None:
            while len(batchjob.stack) > 0:
                jobs = batchjob.stack[-1]
                #If the jobs still exist they have not been run, so break
                if jobStore.exists(jobs[0][0]):
                    break
                #However, if they are gone then we can remove them from the stack.
                #This is the only way to flush successors that have previously been run
                #, as jobs are, as far as possible, read only in the leader.
                batchjob.stack.pop()
                
                
        #This cleans the old log file which may 
        #have been left if the batchjob is being retried after a batchjob failure.
        if batchjob.logJobStoreFileID != None:
            batchjob.clearLogFile(jobStore)
    
        ##########################################
        #Setup the stats, if requested
        ##########################################
        
        if config.attrib.has_key("stats"):
            startTime = time.time()
            startClock = getTotalCpuTime()
            stats = ET.Element("worker")
        else:
            stats = None

        startTime = time.time() 
        while True:
            ##########################################
            #Run the batchjob, if there is one
            ##########################################
            
            if batchjob.command != None:
                if batchjob.command[:11] == "scriptTree ":
                    #Make a temporary file directory for the job
                    localTempDir = makeSubDir(os.path.join(localWorkerTempDir, "localTempDir"))
                    
                    #Is a job command
                    messages = loadJob(batchjob.command, jobStore)._execute(batchjob=batchjob,
                                    stats=stats, localTempDir=localTempDir, 
                                    jobStore=jobStore)
                    
                    #Remove the temporary file directory
                    shutil.rmtree(localTempDir)
    
                else: #Is another command (running outside of jobs may be deprecated)
                    system(batchjob.command)
                    messages = []
            else:
                #The command may be none, in which case
                #the batchjob is just a shell ready to be deleted
                assert len(batchjob.stack) == 0
                messages = []
                break
            
            ##########################################
            #Establish if we can run another batchjob within the worker
            ##########################################
            
            #Exceeded the amount of time the worker is allowed to run for so quit
            if time.time() - startTime > float(config.attrib["job_time"]):
                logger.debug("We are breaking because the maximum time the batchjob should run for has been exceeded")
                break

            #No more jobs to run so quit
            if len(batchjob.stack) == 0:
                break
            
            #Get the next set of jobs to run
            jobs = batchjob.stack[-1]
            assert len(jobs) > 0
            
            #If there are 2 or more jobs to run in parallel we quit
            if len(jobs) >= 2:
                logger.debug("No more jobs can run in series by this worker,"
                            " it's got %i children", len(jobs)-1)
                break
            
            #We check the requirements of the batchjob to see if we can run it
            #within the current worker
            successorJobStoreID, successorMemory, successorCpu, successorsDisk, successorPredecessorID = jobs[0]
            if successorMemory > batchjob.memory:
                logger.debug("We need more memory for the next batchjob, so finishing")
                break
            if successorCpu > batchjob.cpu:
                logger.debug("We need more cpus for the next batchjob, so finishing")
                break
            if successorsDisk > batchjob.disk:
                logger.debug("We need more disk for the next batchjob, so finishing")
                break
            if successorPredecessorID != None: 
                logger.debug("The batchjob has multiple predecessors, we must return to the leader.")
                break
          
            ##########################################
            #We have a single successor batchjob.
            #We load the successor batchjob and transplant its command and stack
            #into the current batchjob so that it can be run
            #as if it were a command that were part of the current batchjob.
            #We can then delete the successor batchjob in the jobStore, as it is
            #wholly incorporated into the current batchjob.
            ##########################################
            
            #Remove the successor batchjob
            batchjob.stack.pop()
            
            #Load the successor batchjob
            successorJob = jobStore.load(successorJobStoreID)
            #These should all match up
            assert successorJob.memory == successorMemory
            assert successorJob.cpu == successorCpu
            assert successorJob.predecessorsFinished == set()
            assert successorJob.predecessorNumber == 1
            assert successorJob.command != None
            assert successorJobStoreID == successorJob.jobStoreID
            
            #Transplant the command and stack to the current batchjob
            batchjob.command = successorJob.command
            batchjob.stack += successorJob.stack
            assert batchjob.memory >= successorJob.memory
            assert batchjob.cpu >= successorJob.cpu
            
            #Checkpoint the batchjob and delete the successorJob
            batchjob.jobsToDelete = [ successorJob.jobStoreID ]
            jobStore.update(batchjob)
            jobStore.delete(successorJob.jobStoreID)
            
            logger.debug("Starting the next batchjob")
        
        ##########################################
        #Finish up the stats
        ##########################################

        if stats != None:
            totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            stats.attrib["time"] = str(time.time() - startTime)
            stats.attrib["clock"] = str(totalCpuTime - startClock)
            stats.attrib["memory"] = str(totalMemoryUsage)
            m = ET.SubElement(stats, "messages")
            for message in messages:
                ET.SubElement(m, "message").text = message
            jobStore.writeStatsAndLogging(ET.tostring(stats))
        elif len(messages) > 0: #No stats, but still need to report log messages
            l = ET.Element("worker")
            m = ET.SubElement(l, "messages")
            for message in messages:
                ET.SubElement(m, "message").text = message
            jobStore.writeStatsAndLogging(ET.tostring(l))
        
        logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds", time.time() - startTime)
    
    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except: #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error("Exiting the worker because of a failed batchjob on host %s", socket.gethostname())
        batchjob = jobStore.load(jobStoreID)
        batchjob.setupJobAfterFailure(config)
        workerFailed = True

    ##########################################
    #Cleanup
    ##########################################
    
    #Close the worker logging
    #Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    #Flush at the OS level
    os.fsync(1)
    os.fsync(2)
    
    #Close redirected stdout and replace with the original standard output.
    os.dup2(origStdOut, 1)
    
    #Close redirected stderr and replace with the original standard error.
    os.dup2(origStdOut, 2)
    
    #sys.stdout and sys.stderr don't need to be modified at all. We don't need
    #to call redirectLoggerStreamHandlers since they still log to sys.stderr
    
    #Close our extra handles to the original standard output and standard error
    #streams, so we don't leak file handles.
    os.close(origStdOut)
    os.close(origStdErr)
    
    #Now our file handles are in exactly the state they were in before.
    
    #Copy back the log file to the global dir, if needed
    if workerFailed:
        truncateFile(tempWorkerLogPath)
        batchjob.setLogFile(tempWorkerLogPath, jobStore)
        os.remove(tempWorkerLogPath)
        jobStore.update(batchjob)

    #Remove the temp dir
    shutil.rmtree(localWorkerTempDir)
    
    #This must happen after the log file is done with, else there is no place to put the log
    if (not workerFailed) and batchjob.command == None and len(batchjob.stack) == 0:
        #We can now safely get rid of the batchjob
        jobStore.delete(batchjob.jobStoreID)