Esempio n. 1
0
    job.setCommandline('R --no-readline --no-restore --no-save -f '+path_to_inputfile)

    batch_job.addJob(job)
    
batch_job.addInputFile('/home/markus/Desktop/R/'+inputfilename)
batch_job.setDefaultNoCpus(1)
batch_job.setDefaultWalltimeInSeconds(walltime)


print 'preparing jobs on backend...'

batch_job.prepareAndCreateJobs(redistribute)

if redistribute:
    print 'job distribution:'
    print batch_job.getOptimizationResult()

print 'submitting jobs to grid...'
batch_job.submit(True)

print 'submission finished...'




    
    
    

Esempio n. 2
0
try:
    print "Creating jobs on the backend and staging files..."
    # by specifying "True" we tell the backend to automatically distribute the jobs to all available submission locations
    # this can be finetuned by exluding or including sites. another option would be to specifying the submission location
    # for every single job and setting "False" below (this would make job submission faster since jobs don't need to be re-distributed/moved on the backend).
    batch_job.prepareAndCreateJobs(True)
except (JobsException), error:
    for job in error.getFailures().keySet():
        print "Job: " + job.getJobname() + ", Error: " + error.getFailures(
        ).get(job).getLocalizedMessage()

    sys.exit()

print "Job distribution:"
print batch_job.getOptimizationResult()

print "Submitting jobs..."
batch_job.submit()

# now we wait for all jobs to be finished, checking for updates every 10 seconds. in real life we would set a much higher check intervall since we don't want to overload
# the backend and also it's not really necessary
batch_job.waitForJobToFinish(10)

print "BatchJob " + batch_job.getJobname() + " finished."

# finally, everything is ready. We could do a lot more here, but you get the idea...
for job in batch_job.getJobs():
    print "Job: " + job.getJobname() + ", Status: " + job.getStatusString(
        False)
    print "Submitted to: " + job.getJobProperty(Constants.SUBMISSION_SITE_KEY)
Esempio n. 3
0
try:
    print "Creating jobs on the backend and staging files..."
    # by specifying "True" we tell the backend to automatically distribute the jobs to all available submission locations
    # this can be finetuned by exluding or including sites. another option would be to specifying the submission location
    # for every single job and setting "False" below (this would make job submission faster since jobs don't need to be re-distributed/moved on the backend).
    batch_job.prepareAndCreateJobs(True)
except (JobsException), error:
    for job in error.getFailures().keySet():
        print "Job: " + job.getJobname() + ", Error: " + error.getFailures(
        ).get(job).getLocalizedMessage()

    sys.exit()

print "Job distribution:"
for subLoc in batch_job.getOptimizationResult().keySet():
    print subLoc + " : " + batch_job.getOptimizationResult().get(subLoc)

print "Submitting jobs..."
batch_job.submit()

restarted = False

# now we wait for all jobs to finish. Actually, we probably should test whether the job was successful as well...
while not batch_job.isFinished(True):
    # printing some stats
    print batch_job.getProgress()

    # restart failed jobs everytime
    failedpolicy = DefaultResubmitPolicy()
    # to only resubmit failed jobs, we have to remove the waiting jobs resubmission that is set by default
Esempio n. 4
0
    
    
try:
    print "Creating jobs on the backend and staging files..."
    # by specifying "True" we tell the backend to automatically distribute the jobs to all available submission locations
    # this can be finetuned by exluding or including sites. another option would be to specifying the submission location 
    # for every single job and setting "False" below (this would make job submission faster since jobs don't need to be re-distributed/moved on the backend).
    batch_job.prepareAndCreateJobs(True)
except (JobsException), error:
    for job in error.getFailures().keySet():
        print "Job: "+job.getJobname()+", Error: "+error.getFailures().get(job).getLocalizedMessage()

    sys.exit()

print "Job distribution:"
for subLoc in batch_job.getOptimizationResult().keySet():
    print subLoc + " : " +batch_job.getOptimizationResult().get(subLoc)


print "Submitting jobs..."
batch_job.submit()

restarted = False

# now we wait for all jobs to finish. Actually, we probably should test whether the job was successful as well...
while not batch_job.isFinished(True):
    # printing some stats
    print batch_job.getProgress()
    
    # restart failed jobs everytime
    failedpolicy = DefaultResubmitPolicy()