print "Job distribution for restarted jobs:" for subLoc in batch_job.getOptimizationResult().keySet(): resubmitted = True print subLoc + " : " + batch_job.getOptimizationResult().get( subLoc) else: print "Job not restarted (yet)." print "Job not finished yet. Waiting..." time.sleep(3) print "Multipartjob " + batch_job.getBatchJobname() + " finished." # finally, everything is ready. We could do a lot more here, but you get the idea... for job in batch_job.getJobs(): print "Job: " + job.getJobname() + ", Status: " + job.getStatusString( False) print print "Stdout: " print job.getStdOutContent() print print "Stderr: " print job.getStdErrContent() print print print "Finished." # don't forget to exit properly. this cleans up possible existing threads/executors sys.exit()
from grisu.frontend.control.login import LoginManager from grisu.frontend.model.job import JobObject si = LoginManager.loginCommandline("BeSTGRID-DEV") print "Logged in." job = JobObject(si) job.setUniqueJobname("cat_job", si) job.setCommandline("cat text0.txt") job.addInputFileUrl("/home/markus/tmp/text0.txt") job.createJob("/nz/nesi") # job.setSubmissionLocation('[email protected]:ng2.auckland.ac.nz') job.submitJob() print "Job submitted." job.waitForJobToFinish(10) print "Job finished. Status: " + job.getStatusString(False) print "Stdout: " + job.getStdOutContent() print "Stderr: " + job.getStdErrContent() job.kill(True)
# set the commandline that needs to be executed job.setCommandline("echo \"Hello World\"") job.addInputFileUrl('/home/markus/test/singleJobFile_0.txt'); # create the job on the backend and specify the VO to use job.createJob("/ARCS/NGAdmin") print 'Submitting job...' # submit the job job.submitJob() print 'Waiting for the job to finish...' # this waits until the job is finished. Checks every 10 seconds (which would be too often for a real job) finished = job.waitForJobToFinish(10) if not finished: print "not finished yet." # kill the job on the backend anyway job.kill(True); else: print 'Job finished. Status: '+job.getStatusString(False) # download and cache the jobs' stdout and display it's content print "Stdout: " + job.getStdOutContent() # download and cache the jobs' stderr and display it's content print "Stderr: " + job.getStdErrContent() # kill and clean the job on the backend job.kill(True) # don't forget to exit properly. this cleans up possible existing threads/executors sys.exit()
job.setJobname("echo_job-1") # job name must be unique print 'Set jobname to: ' + job.getJobname() # set the name of the application as it is published in MDS. # "generic" means not to use MDS for the lookup. job.setApplication("generic") # "generic" jobs require a submission location to be specified job.setSubmissionLocation("all.q:ng2.scenzgrid.org#SGE") # set the command that needs to be executed job.setCommandline("echo \"Hello World\"") # create the job on the backend and specify the VO to use job.createJob("/ARCS/BeSTGRID") print 'Submitting job...' # submit the job job.submitJob() print 'Waiting for the job to finish...' # this waits until the job is finished. Checks every 10 seconds (which would be too often for a real job) finished = job.waitForJobToFinish(10) print 'Job finished. Status: ' + job.getStatusString(False) # download and cache the jobs' stdout and display it's content print "Stdout: " + job.getStdOutContent() # download and cache the jobs' stderr and display it's content print "Stderr: " + job.getStdErrContent() # kill and clean the job on the backend job.kill(True) # don't forget to exit properly. this cleans up possible existing threads/executors sys.exit()
print batch_job.getOptimizationResult() print "Submitting jobs..." batch_job.submit() # now we wait for all jobs to be finished, checking for updates every 10 seconds. in real life we would set a much higher check intervall since we don't want to overload # the backend and also it's not really necessary batch_job.waitForJobToFinish(10) print "BatchJob "+batch_job.getJobname()+" finished." # finally, everything is ready. We could do a lot more here, but you get the idea... for job in batch_job.getJobs(): print "Job: "+job.getJobname()+", Status: "+job.getStatusString(False) print "Submitted to: "+job.getJobProperty(Constants.SUBMISSION_SITE_KEY) print print "Stdout: " print job.getStdOutContent() print print "Stderr: " print job.getStdErrContent() print print print "Finished." # don't forget to exit properly. this cleans up possible existing threads/executors sys.exit()
).get(job).getLocalizedMessage() print "========================" time.sleep(3) error.printStackTrace() sys.exit(1) time_elapsed = time.time() - time_start print "INFO: Job submission for " + job.getJobname() + " took " + str( time_elapsed) + " seconds" print "INFO: Wait for jobs to finish" for job in jobs: sys.stdout.write("INFO: Waiting for " + job.getJobname() + ".") while not job.isFinished(): sys.stdout.write(".") time.sleep(3) print ".Status: " + job.getStatusString(False) # Create an output directory output_dir = base_job_name + 'output' try: os.mkdir(output_dir) print "INFO: Output directory is " + output_dir except: print "HALT: Could not create output directory " + output_dir sys.exit(1) # Retrieve job output print "INFO: Downloading output to " + output_dir for job in jobs: if job.isSuccessful(True): print "INFO: Downloading stdout for " + job.getJobname()
print "HALT: Exception from grisu backend!" print "Job: " + job.getJobname() + ", Error: " + error.getFailures().get(job).getLocalizedMessage() print"========================" time.sleep(3) error.printStackTrace() sys.exit(1) time_elapsed = time.time() - time_start print "INFO: Job submission for " + job.getJobname() + " took " + str(time_elapsed) + " seconds" print "INFO: Wait for jobs to finish" for job in jobs: sys.stdout.write("INFO: Waiting for " + job.getJobname() + ".") while not job.isFinished(): sys.stdout.write(".") time.sleep(3) print ".Status: " + job.getStatusString(False) # Create an output directory output_dir = base_job_name + 'output' try: os.mkdir(output_dir) print "INFO: Output directory is " + output_dir except: print "HALT: Could not create output directory " + output_dir sys.exit(1) # Retrieve job output print "INFO: Downloading output to " + output_dir for job in jobs: if job.isSuccessful(True): print "INFO: Downloading stdout for " + job.getJobname()