def createJobSpec(nodes, walltime, command, jobName, outputFile=None): transformation = '#json#' datasetName = 'panda.destDB.%s' % subprocess.check_output('uuidgen') destName = 'local' prodSourceLabel = 'user' currentPriority = 1000 job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = jobName job.VO = VO job.transformation = transformation job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = currentPriority job.prodSourceLabel = prodSourceLabel job.computingSite = QUEUE_NAME job.cmtConfig = json.dumps({'name': job.jobName, 'next': None}) lqcd_command = { "nodes": nodes, "walltime": walltime, "name": job.jobName, "command": command } if (outputFile): lqcd_command['outputFile'] = outputFile job.jobParameters = json.dumps(lqcd_command) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName.strip() fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) #job.cmtConfig = None return job
def createJob(self, name, nodes, walltime, command, inputs=None, queuename=None): job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s" % commands.getoutput('uuidgen') job.VO = self.vo job.transformation = self.transformation job.destinationDBlock = self.datasetName job.destinationSE = self.destName job.currentPriority = self.currentPriority job.prodSourceLabel = self.prodSourceLabel job.computingSite = self.site if queuename is None else queuename lqcd_command = { "nodes": nodes, "walltime": walltime, "name": name, "command": command } job.jobParameters = json.dumps(lqcd_command) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) job.cmtConfig = inputs return job
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh' job.destinationDBlock = datasetName #job.destinationSE = destName job.destinationSE = 'local' job.currentPriority = 1000 #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'panda' #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'test' #job.prodSourceLabel = 'ptest' ### 2014-01-27 #job.prodSourceLabel = 'user' job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = "" job.VO = "lsst" fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s,o = Client.submitJobs([job],srvID=aSrvID) print s for x in o: print "PandaID=%s" % x[0]
def send_job(jobid, siteid): _logger.debug('Jobid: ' + str(jobid)) site = sites_.get(siteid) job = jobs_.get(int(jobid)) cont = job.container files_catalog = cont.files fscope = getScope(job.owner.username) datasetName = '{}:{}'.format(fscope, cont.guid) distributive = job.distr.name release = job.distr.release # Prepare runScript parameters = job.distr.command parameters = parameters.replace("$COMMAND$", job.params) parameters = parameters.replace("$USERNAME$", job.owner.username) parameters = parameters.replace("$WORKINGGROUP$", job.owner.working_group) # Prepare metadata metadata = dict(user=job.owner.username) # Prepare PanDA Object pandajob = JobSpec() pandajob.jobDefinitionID = int(time.time()) % 10000 pandajob.jobName = cont.guid pandajob.transformation = client_config.DEFAULT_TRF pandajob.destinationDBlock = datasetName pandajob.destinationSE = site.se pandajob.currentPriority = 1000 pandajob.prodSourceLabel = 'user' pandajob.computingSite = site.ce pandajob.cloud = 'RU' pandajob.VO = 'atlas' pandajob.prodDBlock = "%s:%s" % (fscope, pandajob.jobName) pandajob.coreCount = job.corecount pandajob.metadata = json.dumps(metadata) #pandajob.workingGroup = job.owner.working_group if site.encode_commands: # It requires script wrapper on cluster side pandajob.jobParameters = '%s %s %s "%s"' % (cont.guid, release, distributive, parameters) else: pandajob.jobParameters = parameters has_input = False for fcc in files_catalog: if fcc.type == 'input': f = fcc.file guid = f.guid fileIT = FileSpec() fileIT.lfn = f.lfn fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = guid pandajob.addFile(fileIT) has_input = True if fcc.type == 'output': f = fcc.file fileOT = FileSpec() fileOT.lfn = f.lfn fileOT.destinationDBlock = pandajob.prodDBlock fileOT.destinationSE = pandajob.destinationSE fileOT.dataset = pandajob.prodDBlock fileOT.type = 'output' fileOT.scope = fscope fileOT.GUID = f.guid pandajob.addFile(fileOT) # Save replica meta fc.new_replica(f, site) if not has_input: # Add fake input fileIT = FileSpec() fileIT.lfn = "fake.input" fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = "fake.guid" pandajob.addFile(fileIT) # Prepare lof file fileOL = FileSpec() fileOL.lfn = "%s.log.tgz" % pandajob.jobName fileOL.destinationDBlock = pandajob.destinationDBlock fileOL.destinationSE = pandajob.destinationSE fileOL.dataset = '{}:logs'.format(fscope) fileOL.type = 'log' fileOL.scope = 'panda' pandajob.addFile(fileOL) # Save log meta log = File() log.scope = fscope log.lfn = fileOL.lfn log.guid = getGUID(log.scope, log.lfn) log.type = 'log' log.status = 'defined' files_.save(log) # Save replica meta fc.new_replica(log, site) # Register file in container fc.reg_file_in_cont(log, cont, 'log') # Submit job o = submitJobs([pandajob]) x = o[0] try: #update PandaID PandaID = int(x[0]) job.pandaid = PandaID job.ce = site.ce except: job.status = 'submit_error' jobs_.save(job) return 0
def main(): logger.info('Getting tasks with status send and running') tasks_list = Task.objects.all().filter( Q(status='send') | Q(status='running')) #tasks_list = Task.objects.all().filter(name='dvcs2017align7_mu-') logger.info('Got list of %s tasks' % len(tasks_list)) cdbServerArr = ['compassvm23.cern.ch', 'compassvm24.cern.ch'] cdbServer = cdbServerArr[0] for t in tasks_list: max_send_amount = 1000 logger.info('Getting jobs in status staged or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:max_send_amount] else: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) # jobs_list = Job.objects.all().filter(task=t).filter(file='/castor/cern.ch/compass/data/2017/raw/W04/cdr12116-278485.raw') i = 0 for j in jobs_list: if j.attempt >= j.task.max_attempts: logger.info( 'Number of retry attempts has reached for job %s of task %s' % (j.file, j.task.name)) continue if i > max_send_amount: break logger.info('Job %s of %s' % (i, max_send_amount)) logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) TMPRAWFILE = j.file[j.file.rfind('/') + 1:] logger.info(TMPRAWFILE) TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'input_file': j.file, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } logger.info(TMPMDSTFILE) TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(TMPHISTFILE) TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(TMPRICHFILE) EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'prodSlt': j.task.prodslt, 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(EVTDUMPFILE) STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDOUTFILE) STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDERRFILE) PRODSOFT = j.task.soft logger.info(PRODSOFT) ProdPathAndName = j.task.home + j.task.path + j.task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % { 'prodName': j.task.production, 'fileYear': j.task.year, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 if j.task.type == 'DDD filtering': job.currentPriority = 1000 job.prodSourceLabel = 'prod_test' job.computingSite = j.task.site job.attemptNr = j.attempt + 1 job.maxAttempt = j.task.max_attempts if j.status == 'failed': job.parentID = j.panda_id head, tail = os.path.split(j.file) cdbServer = cdbServerArr[random.randrange(len(cdbServerArr))] # logs, and all files generated during execution will be placed in log (except output file) #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName} if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;cp %(input_file)s .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;export CDBSERVER=%(cdbServer)s;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE, 'cdbServer': cdbServer } if j.task.type == 'DDD filtering': job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/src/DaqDataDecoding/examples/how-to/ddd --filter-CAL --out=testevtdump.raw %(TMPRAWFILE)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } # fileIRaw = FileSpec() # fileIRaw.lfn = "%s" % (input_file) # fileIRaw.destinationDBlock = job.destinationDBlock # fileIRaw.destinationSE = job.destinationSE # fileIRaw.dataset = job.destinationDBlock # fileIRaw.type = 'input' # job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.out.gz" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr) fileOLog = FileSpec() fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % { 'prodName': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (TMPMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOTrafdic = FileSpec() fileOTrafdic.lfn = "%s" % (TMPHISTFILE) fileOTrafdic.destinationDBlock = job.destinationDBlock fileOTrafdic.destinationSE = job.destinationSE fileOTrafdic.dataset = job.destinationDBlock fileOTrafdic.type = 'output' job.addFile(fileOTrafdic) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production' or j.task.type == 'DDD filtering': fileOtestevtdump = FileSpec() fileOtestevtdump.lfn = "testevtdump.raw" fileOtestevtdump.destinationDBlock = job.destinationDBlock fileOtestevtdump.destinationSE = job.destinationSE fileOtestevtdump.dataset = job.destinationDBlock fileOtestevtdump.type = 'output' job.addFile(fileOtestevtdump) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': j_update = Job.objects.get(id=j.id) j_update.panda_id = x[0] j_update.status = 'sent' j_update.attempt = j_update.attempt + 1 j_update.date_updated = timezone.now() try: j_update.save() logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], timezone.now())) if j_update.task.status == 'send': logger.info( 'Going to update status of task %s from send to running' % j_update.task.name) t_update = Task.objects.get(id=j_update.task.id) t_update.status = 'running' t_update.date_updated = timezone.now() try: t_update.save() logger.info('Task %s updated' % t_update.name) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen') destName = 'local' job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s" % commands.getoutput('uuidgen') # MPI transform on Titan that will run actual job job.transformation = '/lustre/atlas/proj-shared/csc108/panitkin/alicetest1/m\ pi_wrapper_alice_ppbench.py' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 1000 job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = " " job.VO = 'alice' fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s, o = Client.submitJobs([job], srvID=aSrvID) print s for x in o: print "PandaID=%s" % x[0]
#job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh' job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh' job.destinationDBlock = datasetName #job.destinationSE = destName job.destinationSE = 'local' job.currentPriority = 1000 #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'panda' #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'test' #job.prodSourceLabel = 'ptest' ### 2014-01-27 #job.prodSourceLabel = 'user' job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = "" job.VO = "lsst" fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s, o = Client.submitJobs([job], srvID=aSrvID) print s for x in o: print "PandaID=%s" % x[0]
def main(): logger.info('Getting tasks with status send and running') # tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running')) tasks_list = Task.objects.all().filter(name='TestTaskBW') logger.info('Got list of %s tasks' % len(tasks_list)) for t in tasks_list: logger.info('Getting jobs in status defined or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:max_send_amount] else: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) i = 0 for j in jobs_list: if i >= max_send_amount: break logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) job = JobSpec() job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = 'hello world' job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 job.prodSourceLabel = 'test' job.computingSite = site job.attemptNr = 1 job.maxAttempt = 5 job.sourceSite = 'BW_COMPASS_MCORE' job.VO = 'local' # logs, and all files generated during execution will be placed in log (except output file) job.jobParameters = 'python /u/sciteam/petrosya/panda/hello.py' fileOLog = FileSpec() fileOLog.lfn = "log.job.log.tgz" fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) logger.info(o) # for x in o: # logger.info("PandaID=%s" % x[0]) # today = datetime.datetime.today() # # if x[0] != 0 and x[0] != 'NULL': # j_update = Job.objects.get(id=j.id) # j_update.panda_id = x[0] # j_update.status = 'sent' # j_update.attempt = j_update.attempt + 1 # j_update.date_updated = today # # try: # j_update.save() # logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today)) # except IntegrityError as e: # logger.exception('Unique together catched, was not saved') # except DatabaseError as e: # logger.exception('Something went wrong while saving: %s' % e.message) # else: # logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files = '' input_files_copy = '' for j in files_list: TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } input_files += ' ' + TMPMDSTFILE if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp /scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' else: input_files_copy += ' xrdcp -N -f root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-mdst' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDHISTFILE = '%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDHISTFILE = MERGEDHISTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDHISTFILE) MERGEDMDSTFILE = 'mDST-%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDMDSTFILE = MERGEDMDSTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDMDSTFILE) TMPHISTFILE = 'merge-%(runNumber)s-ch%(mergeChunkNumber)s.root' % { 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } logger.info(TMPHISTFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-mdst-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging mdst' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_mdst + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_mdst == 'failed': job.parentID = j.panda_id_merging_mdst if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (MERGEDMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_mdst = x[0] j_update.status_merging_mdst = 'sent' j_update.attempt_merging_mdst = j_update.attempt_merging_mdst + 1 j_update.chunk_number_merging_mdst = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files_copy = '' input_files_rm = '' for j in files_list: TMPDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp $dumpspath/' + TMPDUMPFILE + ' .;' else: input_files_copy += ' xrdcp -N -f $dumpspath/' + TMPDUMPFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-dump' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDDUMPFILE = 'evtdump%(prodSlt)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt } if format(merge_chunk_number, '03d') != '000': MERGEDDUMPFILE = MERGEDDUMPFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDDUMPFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft if j.task.site == 'BW_COMPASS_MCORE': dumpsPath = '/scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) else: dumpsPath = 'root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-dump-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging dump' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_evntdmp + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_evntdmp == 'failed': job.parentID = j.panda_id_merging_evntdmp if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOdump = FileSpec() fileOdump.lfn = "%s" % (MERGEDDUMPFILE) fileOdump.destinationDBlock = job.destinationDBlock fileOdump.destinationSE = job.destinationSE fileOdump.dataset = job.destinationDBlock fileOdump.type = 'output' job.addFile(fileOdump) # fileOstdout = FileSpec() # fileOstdout.lfn = "payload_stdout.txt" # fileOstdout.destinationDBlock = job.destinationDBlock # fileOstdout.destinationSE = job.destinationSE # fileOstdout.dataset = job.destinationDBlock # fileOstdout.type = 'output' # job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_evntdmp = x[0] j_update.status_merging_evntdmp = 'sent' j_update.attempt_merging_evntdmp = j_update.attempt_merging_evntdmp + 1 j_update.chunk_number_merging_evntdmp = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen') destName = 'local' job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s" % commands.getoutput('uuidgen') # MPI transform on Titan that will run actual job job.transformation = '/lustre/atlas/proj-shared/csc108/transforms/mpi_wrapper_alice_A01alicegeo.py' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 1000 job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = " " job.VO = 'alice' fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s,o = Client.submitJobs([job],srvID=aSrvID) print s for x in o: print "PandaID=%s" % x[0]