Exemplo n.º 1
0
 def issueJobs(self, jobCommands):
     """Issues parasol with job commands.
     """
     issuedJobs = {}
     for jobCommand, memory, cpu, logFile in jobCommands:
         assert memory != None
         assert cpu != None
         assert logFile != None
         pattern = re.compile("your job ([0-9]+).*")
         command = "parasol -verbose -ram=%i -cpu=%i -results=%s add job '%s'" % (memory, cpu, self.parasolResultsFile, jobCommand)
         while True:
             #time.sleep(0.1) #Sleep to let parasol catch up #Apparently unnecessary
             popenParasolCommand(command, self.scratchFile)
             fileHandle = open(self.scratchFile, 'r')
             line = fileHandle.readline()
             fileHandle.close()
             match = pattern.match(line)
             if match != None: #This is because parasol add job will return success, even if the job was not properly issued!
                 break
             else:
                 logger.info("We failed to properly add the job, we will try again after a sleep")
                 time.sleep(5)
         jobID = int(match.group(1))
         logger.debug("Got the job id: %s from line: %s" % (jobID, line))
         assert jobID not in issuedJobs.keys()
         issuedJobs[jobID] = jobCommand
         logger.debug("Issued the job command: %s with job id: %i " % (command, jobID))
     return issuedJobs
Exemplo n.º 2
0
 def getUpdatedJobs(self):
     """We use the parasol results to update the status of jobs, adding them
     to the list of updated jobs.
     
     Results have the following structure.. (thanks Mark D!)
     
     int status;    /* Job status - wait() return format. 0 is good. */
     char *host;    /* Machine job ran on. */
     char *jobId;    /* Job queuing system job ID */
     char *exe;    /* Job executable file (no path) */
     int usrTicks;    /* 'User' CPU time in ticks. */
     int sysTicks;    /* 'System' CPU time in ticks. */
     unsigned submitTime;    /* Job submission time in seconds since 1/1/1970 */
     unsigned startTime;    /* Job start time in seconds since 1/1/1970 */
     unsigned endTime;    /* Job end time in seconds since 1/1/1970 */
     char *user;    /* User who ran job */
     char *errFile;    /* Location of stderr file on host */
     
     plus you finally have the command name..
     """
     line = self.parasolResultsFileHandle.readline()
     updatedJobs = {}
     while line != '':
         results = line.split()
         if line[-1] == '\n':
             line = line[:-1]
         logger.debug("Parasol completed a job, this is what we got: %s" % line)
         result = int(results[0])
         jobID = int(results[2])
         updatedJobs[jobID] = result
         line = self.parasolResultsFileHandle.readline()
     return updatedJobs
Exemplo n.º 3
0
 def addJob(self, command, sgeJobID, issuedJobs, index=None):
     jobID = self.nextJobID
     self.nextJobID += 1
     self.jobIDs[(sgeJobID, index)] = jobID
     self.sgeJobIDs[jobID] = (sgeJobID, index) 
     assert jobID not in issuedJobs.keys()
     issuedJobs[jobID] = command
     logger.debug("Issued the job command: %s with job id: %s " % (command, str(jobID)))
     self.currentjobs.add(jobID)
     self.newJobsQueue.put((sgeJobID, index))
Exemplo n.º 4
0
def prepareQsub(cpu, mem):
    qsubline = list(["qsub","-b","y","-terse","-j" ,"y", "-cwd","-v", 
                     "LD_LIBRARY_PATH=%s" % os.environ["LD_LIBRARY_PATH"]])
    reqline = list()
    if cpu is not None:
        reqline.append("p="+str(cpu))
    if mem is not None:
        reqline.append("vf="+str(mem/ 1024)+"K")
    if len(reqline) > 0:
        qsubline.extend(["-hard","-l", ",".join(reqline)])
    logger.debug("**"+" ".join(qsubline))
    return qsubline
Exemplo n.º 5
0
 def issueJobs(self, commands):
     """Runs the jobs right away.
     """
     issuedJobs = {}
     for command, memory, cpu, logFile in commands: #Add the commands to the queue
         assert memory != None
         assert cpu != None
         assert logFile != None
         logger.debug("Issuing the command: %s with memory: %i, cpu: %i" % (command, memory, cpu))
         self.jobs[self.jobIndex] = command
         issuedJobs[self.jobIndex] = command
         self.inputQueue.put((command, logFile, self.jobIndex))
         self.jobIndex += 1
     return issuedJobs
Exemplo n.º 6
0
 def getUpdatedJobs(self):
     """Returns a map of the run jobs and the return value of their processes.
     """
     runJobs = {}
     try:
         while True:
             command, exitValue, jobID = self.outputQueue.get_nowait()
             runJobs[jobID] = exitValue
             self.jobs.pop(jobID)
             logger.debug("Ran the command: %s with exit value: %i" % (command, exitValue))
             self.outputQueue.task_done()
     except Empty:
         pass
     return runJobs
Exemplo n.º 7
0
def issueJobs(jobs, jobIDsToJobsHash, batchSystem):
    """Issues jobs to the batch system.
    """
    jobCommands = {}
    for job in jobs:
        jobCommand = os.path.join(workflowRootPath(), "bin", "jobTreeSlave")
        followOnJob = job.find("followOns").findall("followOn")[-1]
        jobCommands["%s -E %s %s --job %s" % (sys.executable, jobCommand, os.path.split(workflowRootPath())[0], job.attrib["file"])] = (job.attrib["file"], int(followOnJob.attrib["memory"]), int(followOnJob.attrib["cpu"]), job.attrib["slave_log_file"])
    issuedJobs = batchSystem.issueJobs([ (key, jobCommands[key][1], jobCommands[key][2], jobCommands[key][3]) for key in jobCommands.keys() ])
    assert len(issuedJobs.keys()) == len(jobCommands.keys())
    for jobID in issuedJobs.keys():
        command = issuedJobs[jobID]
        jobFile = jobCommands[command][0]
        assert jobID not in jobIDsToJobsHash
        jobIDsToJobsHash[jobID] = jobFile
        logger.debug("Issued the job: %s with job id: %i " % (jobFile, jobID))
def extract_fastq(target, genome, institute, tissue, bam, reference, out_dir):
    out_path = build_out_dirs(out_dir, genome, institute, tissue, bam)
    if is_paired_sequencing(bam) is True:
        logger.debug("Extracting paired sequencing for bam {} with out_path {}".format(bam, out_path))
        name_sorted_sam_path = os.path.join(out_path, "name_sorted.sam")
        system("samtools sort -n -O sam -T {} {} > {}".format(target.getLocalTempDir(), bam, name_sorted_sam_path))
        fwd_fastq_path = os.path.join(out_path, os.path.basename(bam).split(".")[0] + ".fwd.fastq")
        rev_fastq_path = os.path.join(out_path, os.path.basename(bam).split(".")[0] + ".rev.fastq")
        target.setFollowOnTargetFn(get_paired_fastqs, args=(genome, institute, tissue, bam, reference, out_dir, 
                                                            name_sorted_sam_path, fwd_fastq_path, rev_fastq_path))
    else:
        logger.debug("Extracting UNPAIRED sequencing for bam {} with out_path {}".format(bam, out_path))
        fastq_path = os.path.join(out_path, os.path.basename(bam).split(".")[0] + ".fastq")
        system(r"""samtools view {} | grep -v ^@ | awk '{{print "@"$1"\n"$10"\n+\n"$11}}' > {}""".format(bam, 
                                                                                                        fastq_path))
        read_size = fastq_read_size(fastq_path)
        target.setFollowOnTargetFn(kallisto_single, args=(genome, institute, tissue, bam, reference, out_dir, 
                                                          fastq_path, read_size))
Exemplo n.º 9
0
    def run(self):
        self.options.tracks = os.path.join(self.options.dir, "tracks")
        if not os.path.exists(self.options.tracks):
            os.mkdir(self.options.tracks)
        self.addChildTarget(PrepareBAMBAMGraph(self.options))
        self.addChildTarget(PrepareCBSGraph(self.options))
        self.addChildTarget(PrepareBreakendGraph(self.options))
        self.addChildTarget(PrepareCycleGraph(self.options))
        self.addChildTarget(PrepareCactusCoverageGraphs(self.options))

        if self.options.bam is not None:
            os.chdir(self.options.tracks)
            logger.debug(os.path.basename(self.options.bam))
            if os.path.lexists(os.path.basename(self.options.bam) + ".bai"):
                os.remove(os.path.basename(self.options.bam) + ".bai")
            if os.path.lexists(os.path.basename(self.options.bam)):
                os.remove(os.path.basename(self.options.bam))
            do("ln -s %s" % self.options.bam)
            do("ln -s %s.bai" % self.options.bam)
        self.setFollowOnTarget(PrepareIndex(self.options))
Exemplo n.º 10
0
    def run(self):
    	self.options.tracks = os.path.join(self.options.dir, "tracks")
        if not os.path.exists(self.options.tracks):
		os.mkdir(self.options.tracks)
        self.addChildTarget(PrepareBAMBAMGraph(self.options))
        self.addChildTarget(PrepareCBSGraph(self.options))
	self.addChildTarget(PrepareBreakendGraph(self.options))
        self.addChildTarget(PrepareCycleGraph(self.options))
	self.addChildTarget(PrepareCactusCoverageGraphs(self.options))

	if self.options.bam is not None:
		os.chdir(self.options.tracks)
		logger.debug(os.path.basename(self.options.bam))
		if os.path.lexists(os.path.basename(self.options.bam) + ".bai"):
			os.remove(os.path.basename(self.options.bam) + ".bai")
		if os.path.lexists(os.path.basename(self.options.bam)):
			os.remove(os.path.basename(self.options.bam))
		do("ln -s %s" % self.options.bam)
		do("ln -s %s.bai" % self.options.bam)
        self.setFollowOnTarget(PrepareIndex(self.options))
Exemplo n.º 11
0
def mainLoop(config, batchSystem):
    """This is the main loop from which jobs are issued and processed.
    """    
    waitDuration = float(config.attrib["wait_duration"])
    assert waitDuration >= 0
    rescueJobsFrequency = float(config.attrib["rescue_jobs_frequency"])
    maxJobDuration = float(config.attrib["max_job_duration"])
    assert maxJobDuration >= 0
    logger.info("Got parameters, wait duration %s, rescue jobs frequency: %s max job duration: %s" % \
                (waitDuration, rescueJobsFrequency, maxJobDuration))
    
    #Kill any jobs on the batch system queue from the last time.
    assert len(batchSystem.getIssuedJobIDs()) == 0 #Batch system must start with no active jobs!
    logger.info("Checked batch system has no running jobs and no updated jobs")
    
    jobFiles = config.attrib["job_file_dir"].listFiles()
    logger.info("Got a list of job files")
    
    #Repair the job tree using any .old files
    fixJobsList(config, jobFiles)
    logger.info("Fixed the job files using any .old files")
    
    #Get jobs that were running, or that had failed reset to 'white' status
    restartFailedJobs(config, jobFiles)
    logger.info("Reworked failed jobs")
    
    updatedJobFiles = set() #Jobs whose status needs updating, either because they have finished, or because they need to be started.
    for jobFile in jobFiles:
        job = ET.parse(jobFile).getroot()
        if job.attrib["colour"] not in ("grey", "blue"):
            updatedJobFiles.add(jobFile)
    logger.info("Got the active (non grey/blue) job files")
    
    totalJobFiles = len(jobFiles) #Total number of job files we have.
    jobIDsToJobsHash = {} #A hash of the currently running jobs ids, made by the batch system.
    
    idealJobTime = float(config.attrib["job_time"]) 
    assert idealJobTime > 0.0
    
    maxIssuedJobs = int(config.attrib["max_jobs"]) #The maximum number of jobs to issue to the batch system
    assert maxIssuedJobs >= 1
    
    stats = config.attrib.has_key("stats")
    if stats:
        startTime = time.time()
        startClock = getTotalCpuTime()
    
    logger.info("Starting the main loop")
    timeSinceJobsLastRescued = time.time() - rescueJobsFrequency + 100 #We hack it so that we rescue jobs after the first 100 seconds to get around an apparent parasol bug
    while True:
        if len(updatedJobFiles) > 0:
            logger.debug("Built the jobs list, currently have %i job files, %i jobs to update and %i jobs currently issued" % (totalJobFiles, len(updatedJobFiles), len(jobIDsToJobsHash)))
        
	jobsToIssue = []
        for jobFile in list(updatedJobFiles):
            job = ET.parse(jobFile).getroot()
            assert job.attrib["colour"] not in ("grey", "blue")
            
            if job.attrib["colour"] == "white": #Get ready to start the job
                if len(jobIDsToJobsHash) < maxIssuedJobs:
                    logger.debug("Job: %s is being started" % job.attrib["file"])
                    updatedJobFiles.remove(job.attrib["file"])
                    
                    #Reset the log files for the job.
                    open(job.attrib["slave_log_file"], 'w').close()
                    open(job.attrib["log_file"], 'w').close()
                    
                    job.attrib["colour"] = "grey"
                    #writeJobs([ job ]) #Check point, do this before issuing job, so state is not read until issued
                    
                    #issueJobs([ job ], jobIDsToJobsHash, batchSystem)
		    jobsToIssue.append(job)
                else:
                    logger.debug("Job: %s is not being issued yet because we have %i jobs issued" % (job.attrib["file"], len(jobIDsToJobsHash)))
            elif job.attrib["colour"] == "black": #Job has finished okay
                logger.debug("Job: %s has finished okay" % job.attrib["file"])
                #Deal with stats
                if stats:
                    system("cat %s >> %s" % (job.attrib["stats"], config.attrib["stats"]))
                    open(job.attrib["stats"], 'w').close() #Reset the stats file
                if job.find("messages") != None:
                    for message in job.find("messages").findall("message"):
                        logger.info("Received the following message from job: %s" % message.attrib["message"])
                    job.remove(job.find("messages"))
                childCount = int(job.attrib["child_count"])
                blackChildCount = int(job.attrib["black_child_count"])
                assert childCount == blackChildCount #Has no currently running child jobs
                #Launch any unborn children
                unbornChildren = job.find("children")
                unbornChild = unbornChildren.find("child")
                if unbornChild != None: #We must give birth to the unborn children
                    logger.debug("Job: %s has children to schedule" % job.attrib["file"])
                    newChildren = []
                    while unbornChild != None:
                        cummulativeChildTime = float(unbornChild.attrib["time"])
                        
                        newJob = createJob(unbornChild.attrib.copy(), job.attrib["file"], config)
                        
                        totalJobFiles += 1
                        updatedJobFiles.add(newJob.attrib["file"])
                        
                        newChildren.append(newJob)
                        unbornChildren.remove(unbornChild)
                        unbornChild = unbornChildren.find("child")
                        
                        #This was code to aggregate groups of children into one job, but we don't do this now
                        #while cummulativeChildTime < idealJobTime and unbornChild != None: #Cummulate a series of children into each job as a stack of jobs (to balance cost of parellelism with cost of running stuff in serially).
                        #    cummulativeChildTime += float(unbornChild.attrib["time"])
                        #    ET.SubElement(newJob.find("followOns"), "followOn", unbornChild.attrib.copy())    
                        #    unbornChildren.remove(unbornChild)
                        #    unbornChild = unbornChildren.find("child")
                        newJob.attrib["total_time"] = str(cummulativeChildTime)
                    
                    updatedJobFiles.remove(job.attrib["file"])
                    job.attrib["child_count"] = str(childCount + len(newChildren))
                    job.attrib["colour"] = "blue" #Blue - has children running.
                    writeJobs([ job ] + newChildren ) #Check point
                    
                elif len(job.find("followOns").findall("followOn")) != 0: #Has another job
                    logger.debug("Job: %s has a new command that we can now issue" % job.attrib["file"])
                    ##Reset the job run info
                    job.attrib["remaining_retry_count"] = config.attrib["retry_count"]
                    job.attrib["colour"] = "white"
                    ##End resetting the job
                    writeJobs([ job ])
                    
                else: #Job has finished, so we can defer to any parent
                    logger.debug("Job: %s is now dead" % job.attrib["file"])
                    job.attrib["colour"] = "dead"
                    if job.attrib.has_key("parent"):
                        parent = ET.parse(job.attrib["parent"]).getroot()
                        assert parent.attrib["colour"] == "blue"
                        assert int(parent.attrib["black_child_count"]) < int(parent.attrib["child_count"])
                        parent.attrib["black_child_count"] = str(int(parent.attrib["black_child_count"]) + 1)
                        if int(parent.attrib["child_count"]) == int(parent.attrib["black_child_count"]):
                            parent.attrib["colour"] = "black"
                            assert parent.attrib["file"] not in updatedJobFiles
                            updatedJobFiles.add(parent.attrib["file"])
                        writeJobs([ job, parent ]) #Check point
                    else:
                        writeJobs([ job ])
                         
            elif job.attrib["colour"] == "red": #Job failed
                logger.critical("Job: %s failed" % job.attrib["file"])
                logger.critical("The log file of the failed job")
                logFile(job.attrib["log_file"], logger.critical)
                logger.critical("The log file of the slave for the failed job")
                logFile(job.attrib["slave_log_file"], logger.critical) #We log the job log file in the main loop
                #Checks
                assert len(job.find("children").findall("child")) == 0
                assert int(job.attrib["child_count"]) == int(job.attrib["black_child_count"])
                
                remainingRetyCount = int(job.attrib["remaining_retry_count"])
                if remainingRetyCount > 0: #Give it another try, maybe there is a bad node somewhere
                    job.attrib["remaining_retry_count"] = str(remainingRetyCount-1)
                    job.attrib["colour"] = "white"
                    logger.critical("Job: %s will be restarted, it has %s goes left" % (job.attrib["file"], job.attrib["remaining_retry_count"]))
                    writeJobs([ job ]) #Check point
                else:
                    assert remainingRetyCount == 0
                    updatedJobFiles.remove(job.attrib["file"])
                    logger.critical("Job: %s is completely failed" % job.attrib["file"])
                    
            else:
                logger.debug("Job: %s is already dead, we'll get rid of it" % job.attrib["file"])
                assert job.attrib["colour"] == "dead"
                updatedJobFiles.remove(job.attrib["file"])
                totalJobFiles -= 1
                deleteJob(job, config) #This could be done earlier, but I like it this way.

        ###End of for loop
        writeJobs(jobsToIssue) #Check point, do this before issuing job, so state is not read until issued
        issueJobs(jobsToIssue, jobIDsToJobsHash, batchSystem)

                
        if len(jobIDsToJobsHash) == 0 and len(updatedJobFiles) == 0:
            logger.info("Only failed jobs and their dependents (%i total) are remaining, so exiting." % totalJobFiles)
            break
        
        if len(updatedJobFiles) > 0:
            updatedJobs = batchSystem.getUpdatedJobs() #Asks the batch system what jobs have been completed.
        else:
            updatedJobs = pauseForUpdatedJobs(batchSystem.getUpdatedJobs) #Asks the batch system what jobs have been completed.
        for jobID in updatedJobs.keys(): #Runs through a map of updated jobs and there status, 
            result = updatedJobs[jobID]
            if jobIDsToJobsHash.has_key(jobID): 
                if result == 0:
                    logger.debug("Batch system is reporting that the job %s ended sucessfully" % jobIDsToJobsHash[jobID])   
                else:
                    logger.critical("Batch system is reporting that the job %s failed with exit value %i" % (jobIDsToJobsHash[jobID], result))  
                processFinishedJob(jobID, result, updatedJobFiles, jobIDsToJobsHash)
            else:
                logger.info("A result seems to already have been processed: %i" % jobID) #T
        
        if time.time() - timeSinceJobsLastRescued >= rescueJobsFrequency: #We only rescue jobs every N seconds
            reissueOverLongJobs(updatedJobFiles, jobIDsToJobsHash, config, batchSystem)
            logger.info("Reissued any over long jobs")
            
            reissueMissingJobs(updatedJobFiles, jobIDsToJobsHash, batchSystem)
            logger.info("Rescued any (long) missing jobs")
            timeSinceJobsLastRescued = time.time()
        #Going to sleep to let the job system catch up.
        time.sleep(waitDuration)
    
    if stats:
        fileHandle = open(config.attrib["stats"], 'a')
        fileHandle.write("<total_time time='%s' clock='%s'/></stats>" % (str(time.time() - startTime), str(getTotalCpuTime() - startClock)))
        fileHandle.close()
    
    logger.info("Finished the main loop")     
    
    return totalJobFiles #Returns number of failed jobs
Exemplo n.º 12
0
def processFinishedJob(jobID, resultStatus, updatedJobFiles, jobIDsToJobsHash):
    """Function reads a processed job file and updates it state.
    """
    assert jobID in jobIDsToJobsHash
    jobFile = jobIDsToJobsHash.pop(jobID)
    
    updatingFileIsPresent = os.path.isfile(jobFile + ".updating")
    newFileIsPresent = os.path.isfile(jobFile + ".new")
    
    if resultStatus == 0 and updatingFileIsPresent:
        logger.critical("Despite the batch system claiming success there is a .updating file present: %s", jobFile + ".updating")
        
    if resultStatus == 0 and newFileIsPresent:
        logger.critical("Despite the batch system claiming success there is a .new file present: %s", jobFile + ".new")

    if resultStatus != 0 or newFileIsPresent or updatingFileIsPresent: #Job not successful according to batchsystem, or according to the existance of a .new or .updating file
        if updatingFileIsPresent: #The job failed while attempting to write the job file.
            logger.critical("There was an .updating file for the crashed job: %s" % jobFile)
            if os.path.isfile(jobFile + ".new"): #The job failed while writing the updated job file.
                logger.critical("There was an .new file for the crashed job: %s" % jobFile)
                os.remove(jobFile + ".new") #The existance of the .updating file means it wasn't complete
            os.remove(jobFile + ".updating") #Delete second the updating file second to preserve a correct state
            assert os.path.isfile(jobFile)
            job = ET.parse(jobFile).getroot() #The original must still be there.
            assert job.find("children").find("child") == None #The original can not reflect the end state of the job.
            assert int(job.attrib["black_child_count"]) == int(job.attrib["child_count"])
            job.attrib["colour"] = "red" #It failed, so we mark it so and continue.
            writeJobs([ job ])
            logger.critical("We've reverted to the original job file and marked it as failed: %s" % jobFile)
        else:
            if newFileIsPresent: #The job was not properly updated before crashing
                logger.critical("There is a valid .new file %s" % jobFile)
                if os.path.isfile(jobFile):
                    os.remove(jobFile)
                os.rename(jobFile + ".new", jobFile)
                job = ET.parse(jobFile).getroot()
                if job.attrib["colour"] == "grey": #The job failed while preparing to run another job on the slave
                    assert job.find("children").find("child") == None #File 
                    job.attrib["colour"] = "red"
                    writeJobs([ job ])
                assert job.attrib["colour"] in ("black", "red")
            else:
                logger.critical("There was no valid .new file %s" % jobFile)
                assert os.path.isfile(jobFile)
                job = ET.parse(jobFile).getroot() #The job may have failed before or after creating this file, we check the state.
                if job.attrib["colour"] == "black": #The job completed okay, so we'll keep it
                    logger.critical("Despite the batch system job failing, the job appears to have completed okay")
                else:
                    assert job.attrib["colour"] in ("grey", "red")
                    assert job.find("children").find("child") == None #File 
                    assert int(job.attrib["black_child_count"]) == int(job.attrib["child_count"])
                    if job.attrib["colour"] == "grey":
                        job.attrib["colour"] = "red"
                        writeJobs([ job ])
                    logger.critical("We've reverted to the original job file and marked it as failed: %s" % jobFile)
    else:
        job = ET.parse(jobFile).getroot()
        
    ##Now check the log files exist, because they must ultimately be cleaned up by their respective file trees.
    if not os.path.isfile(job.attrib["log_file"]): #We need to keep these files in existence.
        open(job.attrib["log_file"], 'w').close()
        logger.critical("The log file %s for job %s had disappeared" % (job.attrib["log_file"], jobFile))
    if not os.path.isfile(job.attrib["slave_log_file"]):
        open(job.attrib["slave_log_file"], 'w').close()
        logger.critical("The slave log file %s for job %s had disappeared" % (job.attrib["slave_log_file"], jobFile))
    
    assert jobFile not in updatedJobFiles
    updatedJobFiles.add(jobFile) #Now we know the job is done we can add it to the list of updated job files
    logger.debug("Added job: %s to active jobs" % jobFile)
Exemplo n.º 13
0
def qsub(qsubline):
    logger.debug("**"+" ".join(qsubline))
    process = subprocess.Popen(qsubline, stdout=subprocess.PIPE)
    result = int(process.stdout.readline().strip().split('.')[0])
    logger.debug("Got the job id: %s" % (str(result)))
    return result