if restarted: print "Job distribution for restarted jobs:" for subLoc in multiPartJob.getOptimizationResult().keySet(): resubmitted = True print subLoc + " : " +multiPartJob.getOptimizationResult().get(subLoc) else: print "Job not restarted (yet)." print "Job not finished yet. Waiting..." time.sleep(3) print "Multipartjob "+multiPartJob.getBatchJobname()+" finished." # finally, everything is ready. We could do a lot more here, but you get the idea... for job in multiPartJob.getJobs(): print "Job: "+job.getJobname()+", Status: "+job.getStatusString(False) print print "Stdout: " print job.getStdOutContent() print print "Stderr: " print job.getStdErrContent() print print print "Finished." # don't forget to exit properly. this cleans up possible existing threads/executors sys.exit()
print batchJob.getProgress() print str(batchJob.getNumberOfFailedJobs()) if batchJob.getNumberOfFailedJobs() > 0: print str(batchJob.getNumberOfFailedJobs()) + ' failed jobs found. restarting...' failedpolicy = DefaultResubmitPolicy() batchJob.restart(failedpolicy, True) print 'Restart finished.' time.sleep(5) jobsToRestart = [] for job in batchJob.getJobs(): print "Job: "+job.getJobname()+", Status: "+job.getStatusString(False) try: output = job.getStdOutContent() index = output.find('error') if index != -1: # it doesn't actually make any sense to restart this job, since it would # obviously have the same result again. This is just to demonstrate how to parse # for example the stdout file # if you don't want to parse stdout but another output file, that's possible as well, of course jobsToRestart.append(job) except JobException: print 'Could not read stdout for job ' + job.getJobname()