def main(): logger.info('Getting tasks with status send and running') tasks_list = Task.objects.all().filter( Q(status='send') | Q(status='running')) #tasks_list = Task.objects.all().filter(name='dvcs2017align7_mu-') logger.info('Got list of %s tasks' % len(tasks_list)) cdbServerArr = ['compassvm23.cern.ch', 'compassvm24.cern.ch'] cdbServer = cdbServerArr[0] for t in tasks_list: max_send_amount = 1000 logger.info('Getting jobs in status staged or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:max_send_amount] else: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) # jobs_list = Job.objects.all().filter(task=t).filter(file='/castor/cern.ch/compass/data/2017/raw/W04/cdr12116-278485.raw') i = 0 for j in jobs_list: if j.attempt >= j.task.max_attempts: logger.info( 'Number of retry attempts has reached for job %s of task %s' % (j.file, j.task.name)) continue if i > max_send_amount: break logger.info('Job %s of %s' % (i, max_send_amount)) logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) TMPRAWFILE = j.file[j.file.rfind('/') + 1:] logger.info(TMPRAWFILE) TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'input_file': j.file, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } logger.info(TMPMDSTFILE) TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(TMPHISTFILE) TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(TMPRICHFILE) EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'prodSlt': j.task.prodslt, 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(EVTDUMPFILE) STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDOUTFILE) STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDERRFILE) PRODSOFT = j.task.soft logger.info(PRODSOFT) ProdPathAndName = j.task.home + j.task.path + j.task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % { 'prodName': j.task.production, 'fileYear': j.task.year, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 if j.task.type == 'DDD filtering': job.currentPriority = 1000 job.prodSourceLabel = 'prod_test' job.computingSite = j.task.site job.attemptNr = j.attempt + 1 job.maxAttempt = j.task.max_attempts if j.status == 'failed': job.parentID = j.panda_id head, tail = os.path.split(j.file) cdbServer = cdbServerArr[random.randrange(len(cdbServerArr))] # logs, and all files generated during execution will be placed in log (except output file) #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName} if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;cp %(input_file)s .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;export CDBSERVER=%(cdbServer)s;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE, 'cdbServer': cdbServer } if j.task.type == 'DDD filtering': job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/src/DaqDataDecoding/examples/how-to/ddd --filter-CAL --out=testevtdump.raw %(TMPRAWFILE)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } # fileIRaw = FileSpec() # fileIRaw.lfn = "%s" % (input_file) # fileIRaw.destinationDBlock = job.destinationDBlock # fileIRaw.destinationSE = job.destinationSE # fileIRaw.dataset = job.destinationDBlock # fileIRaw.type = 'input' # job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.out.gz" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr) fileOLog = FileSpec() fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % { 'prodName': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (TMPMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOTrafdic = FileSpec() fileOTrafdic.lfn = "%s" % (TMPHISTFILE) fileOTrafdic.destinationDBlock = job.destinationDBlock fileOTrafdic.destinationSE = job.destinationSE fileOTrafdic.dataset = job.destinationDBlock fileOTrafdic.type = 'output' job.addFile(fileOTrafdic) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production' or j.task.type == 'DDD filtering': fileOtestevtdump = FileSpec() fileOtestevtdump.lfn = "testevtdump.raw" fileOtestevtdump.destinationDBlock = job.destinationDBlock fileOtestevtdump.destinationSE = job.destinationSE fileOtestevtdump.dataset = job.destinationDBlock fileOtestevtdump.type = 'output' job.addFile(fileOtestevtdump) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': j_update = Job.objects.get(id=j.id) j_update.panda_id = x[0] j_update.status = 'sent' j_update.attempt = j_update.attempt + 1 j_update.date_updated = timezone.now() try: j_update.save() logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], timezone.now())) if j_update.task.status == 'send': logger.info( 'Going to update status of task %s from send to running' % j_update.task.name) t_update = Task.objects.get(id=j_update.task.id) t_update.status = 'running' t_update.date_updated = timezone.now() try: t_update.save() logger.info('Task %s updated' % t_update.name) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
def createJobSpec(self, task, outdataset, job, jobset, jobdef, site, jobname, lfnhanger, allsites, jobid): """Create a spec for one job :arg TaskWorker.DataObject.Task task: the task to work on :arg str outdataset: the output dataset name where all the produced files will be placed :arg WMCore.DataStructs.Job job: the abstract job :arg int jobset: the PanDA jobset corresponding to the current task :arg int jobdef: the PanDA jobdef where to append the current jobs --- not used :arg str site: the borkered site where to run the jobs :arg str jobname: the job name :arg str lfnhanger: the random string to be added in the output file name :arg list str allsites: all possible sites where the job can potentially run :arg int jobid: incremental job number :return: the sepc object.""" pandajob = JobSpec() ## always setting a job definition ID pandajob.jobDefinitionID = jobdef if jobdef else -1 ## always setting a job set ID pandajob.jobsetID = jobset if jobset else -1 pandajob.jobName = jobname pandajob.prodUserID = task['tm_user_dn'] pandajob.destinationDBlock = outdataset pandajob.prodDBlock = task['tm_input_dataset'] pandajob.prodSourceLabel = 'user' pandajob.computingSite = site pandajob.cloud = getSite(pandajob.computingSite) pandajob.destinationSE = 'local' pandajob.transformation = task['tm_transformation'] ## need to initialize this pandajob.metadata = '' def outFileSpec(of=None, log=False): """Local routine to create an FileSpec for the an job output/log file :arg str of: output file base name :return: FileSpec object for the output file.""" outfile = FileSpec() if log: outfile.lfn = "job.log_%d_%s.tgz" % (jobid, lfnhanger) outfile.type = 'log' else: outfile.lfn = '%s_%d_%s%s' %(os.path.splitext(of)[0], jobid, lfnhanger, os.path.splitext(of)[1]) outfile.type = 'output' outfile.destinationDBlock = pandajob.destinationDBlock outfile.destinationSE = task['tm_asyncdest'] outfile.dataset = pandajob.destinationDBlock return outfile alloutfiles = [] outjobpar = {} outfilestring = '' for outputfile in task['tm_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn for outputfile in task['tm_tfile_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn for outputfile in task['tm_edm_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn outfilestring = outfilestring[:-1] infiles = [] for inputfile in job['input_files']: infiles.append( inputfile['lfn'] ) pandajob.jobParameters = '-a %s ' % task['tm_user_sandbox'] pandajob.jobParameters += '--sourceURL %s ' % task['tm_cache_url'] pandajob.jobParameters += '--jobNumber=%s ' % jobid pandajob.jobParameters += '--cmsswVersion=%s ' % task['tm_job_sw'] pandajob.jobParameters += '--scramArch=%s ' % task['tm_job_arch'] pandajob.jobParameters += '--inputFile=\'%s\' ' % json.dumps(infiles) self.jobParametersSetting(pandajob, job, self.jobtypeMapper[task['tm_job_type']]) pandajob.jobParameters += '-o "%s" ' % str(outjobpar) pandajob.jobParameters += '--dbs_url=%s ' % task['tm_dbs_url'] pandajob.jobParameters += '--publish_dbs_url=%s ' % task['tm_publish_dbs_url'] pandajob.jobParameters += '--publishFiles=%s ' % ('True' if task['tm_publication'] == 'T' else 'False') pandajob.jobParameters += '--saveLogs=%s ' % ('True' if task['tm_save_logs'] == 'T' else 'False') pandajob.jobParameters += '--availableSites=\'%s\' ' %json.dumps(allsites) pandajob.jobParameters += '--group=%s ' % (task['tm_user_group'] if task['tm_user_group'] else '') pandajob.jobParameters += '--role=%s ' % (task['tm_user_role'] if task['tm_user_role'] else '') self.logger.info(type(task['tm_user_infiles'])) self.logger.info(task['tm_user_infiles']) if task['tm_user_infiles']: addinfilestring = '' for addinfile in task['tm_user_infiles']: addinfilestring += '%s,' % addinfile pandajob.jobParameters += '--userFiles=%s ' % ( addinfilestring[:-1] ) pandajob.jobName = '%s' % task['tm_taskname'] #Needed by ASO and Dashboard if 'panda_oldjobid' in job and job['panda_oldjobid']: pandajob.parentID = job['panda_oldjobid'] pandajob.addFile(outFileSpec(log=True)) for filetoadd in alloutfiles: pandajob.addFile(filetoadd) return pandajob
def main(): logger.info('Getting tasks with status send and running') # tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running')) tasks_list = Task.objects.all().filter(name='dvcs2016P09t2r13v1_mu+') logger.info('Got list of %s tasks' % len(tasks_list)) for t in tasks_list: logger.info('Getting jobs in status defined or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:max_send_amount] else: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) i = 0 for j in jobs_list: if i >= max_send_amount: break logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'COMPASSPRODDISK' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) TMPRAWFILE = j.file[j.file.rfind('/') + 1:] logger.info(TMPRAWFILE) TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'input_file': j.file, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } logger.info(TMPMDSTFILE) TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(TMPHISTFILE) TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(TMPRICHFILE) EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'prodSlt': j.task.prodslt, 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(EVTDUMPFILE) STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % { 'prodNameOnly': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDOUTFILE) STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % { 'prodNameOnly': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDERRFILE) try: file_year = j.file.split('/')[5] logger.info(file_year) except: logger.error('Error while splitting file to get year') sys.exit(1) ProdPathAndName = j.task.home + j.task.path + j.task.soft job = JobSpec() job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % { 'prodName': j.task.soft, 'fileYear': file_year, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 job.prodSourceLabel = 'prod_test' job.computingSite = site job.attemptNr = j.attempt + 1 job.maxAttempt = j.task.max_attempts if j.status == 'failed': job.parentID = j.panda_id head, tail = os.path.split(j.file) # job.transferType = 'direct' job.sourceSite = 'CERN_COMPASS_PROD' # logs, and all files generated during execution will be placed in log (except output file) #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName} job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.soft, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } fileIRaw = FileSpec() fileIRaw.lfn = "%s" % (j.file) fileIRaw.GUID = '5874a461-61d3-4543-8f34-6fd7a4624e78' fileIRaw.fsize = 1073753368 fileIRaw.checksum = '671608be' fileIRaw.destinationDBlock = job.destinationDBlock fileIRaw.destinationSE = job.destinationSE fileIRaw.dataset = job.destinationDBlock fileIRaw.type = 'input' job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.txt" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.txt" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr) fileOLog = FileSpec() fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % { 'prodName': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (TMPMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOTrafdic = FileSpec() fileOTrafdic.lfn = "%s" % (TMPHISTFILE) fileOTrafdic.destinationDBlock = job.destinationDBlock fileOTrafdic.destinationSE = job.destinationSE fileOTrafdic.dataset = job.destinationDBlock fileOTrafdic.type = 'output' job.addFile(fileOTrafdic) fileOtestevtdump = FileSpec() fileOtestevtdump.lfn = "testevtdump.raw" fileOtestevtdump.destinationDBlock = job.destinationDBlock fileOtestevtdump.destinationSE = job.destinationSE fileOtestevtdump.dataset = job.destinationDBlock fileOtestevtdump.type = 'output' job.addFile(fileOtestevtdump) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) logger.info(o) # for x in o: # logger.info("PandaID=%s" % x[0]) # today = datetime.datetime.today() # # if x[0] != 0 and x[0] != 'NULL': # j_update = Job.objects.get(id=j.id) # j_update.panda_id = x[0] # j_update.status = 'sent' # j_update.attempt = j_update.attempt + 1 # j_update.date_updated = today # # try: # j_update.save() # logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today)) # except IntegrityError as e: # logger.exception('Unique together catched, was not saved') # except DatabaseError as e: # logger.exception('Something went wrong while saving: %s' % e.message) # else: # logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
def createJobSpec(self, task, outdataset, job, jobset, jobdef, site, jobname, lfnhanger, allsites, jobid): """Create a spec for one job :arg TaskWorker.DataObject.Task task: the task to work on :arg str outdataset: the output dataset name where all the produced files will be placed :arg WMCore.DataStructs.Job job: the abstract job :arg int jobset: the PanDA jobset corresponding to the current task :arg int jobdef: the PanDA jobdef where to append the current jobs --- not used :arg str site: the borkered site where to run the jobs :arg str jobname: the job name :arg str lfnhanger: the random string to be added in the output file name :arg list str allsites: all possible sites where the job can potentially run :arg int jobid: incremental job number :return: the sepc object.""" pandajob = JobSpec() ## always setting a job definition ID pandajob.jobDefinitionID = jobdef if jobdef else -1 ## always setting a job set ID pandajob.jobsetID = jobset if jobset else -1 pandajob.jobName = jobname pandajob.prodUserID = task['tm_user_dn'] pandajob.destinationDBlock = outdataset pandajob.prodDBlock = task['tm_input_dataset'] pandajob.prodSourceLabel = 'user' pandajob.computingSite = site pandajob.cloud = getSite(pandajob.computingSite) pandajob.destinationSE = 'local' pandajob.transformation = task['tm_transformation'] ## need to initialize this pandajob.metadata = '' def outFileSpec(of=None, log=False): """Local routine to create an FileSpec for the an job output/log file :arg str of: output file base name :return: FileSpec object for the output file.""" outfile = FileSpec() if log: outfile.lfn = "job.log_%d_%s.tgz" % (jobid, lfnhanger) outfile.type = 'log' else: outfile.lfn = '%s_%d_%s%s' % (os.path.splitext(of)[0], jobid, lfnhanger, os.path.splitext(of)[1]) outfile.type = 'output' outfile.destinationDBlock = pandajob.destinationDBlock outfile.destinationSE = task['tm_asyncdest'] outfile.dataset = pandajob.destinationDBlock return outfile alloutfiles = [] outjobpar = {} outfilestring = '' for outputfile in task['tm_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn for outputfile in task['tm_tfile_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn for outputfile in task['tm_edm_outfiles']: outfilestring += '%s,' % outputfile filespec = outFileSpec(outputfile) alloutfiles.append(filespec) #pandajob.addFile(filespec) outjobpar[outputfile] = filespec.lfn outfilestring = outfilestring[:-1] infiles = [] for inputfile in job['input_files']: infiles.append(inputfile['lfn']) pandajob.jobParameters = '-a %s ' % task['tm_user_sandbox'] pandajob.jobParameters += '--sourceURL %s ' % task['tm_cache_url'] pandajob.jobParameters += '--jobNumber=%s ' % jobid pandajob.jobParameters += '--cmsswVersion=%s ' % task['tm_job_sw'] pandajob.jobParameters += '--scramArch=%s ' % task['tm_job_arch'] pandajob.jobParameters += '--inputFile=\'%s\' ' % json.dumps(infiles) self.jobParametersSetting(pandajob, job, self.jobtypeMapper[task['tm_job_type']]) pandajob.jobParameters += '-o "%s" ' % str(outjobpar) pandajob.jobParameters += '--dbs_url=%s ' % task['tm_dbs_url'] pandajob.jobParameters += '--publish_dbs_url=%s ' % task[ 'tm_publish_dbs_url'] pandajob.jobParameters += '--publishFiles=%s ' % ( 'True' if task['tm_publication'] == 'T' else 'False') pandajob.jobParameters += '--saveLogs=%s ' % ( 'True' if task['tm_save_logs'] == 'T' else 'False') pandajob.jobParameters += '--availableSites=\'%s\' ' % json.dumps( allsites) pandajob.jobParameters += '--group=%s ' % ( task['tm_user_group'] if task['tm_user_group'] else '') pandajob.jobParameters += '--role=%s ' % (task['tm_user_role'] if task['tm_user_role'] else '') self.logger.info(type(task['tm_user_infiles'])) self.logger.info(task['tm_user_infiles']) if task['tm_user_infiles']: addinfilestring = '' for addinfile in task['tm_user_infiles']: addinfilestring += '%s,' % addinfile pandajob.jobParameters += '--userFiles=%s ' % ( addinfilestring[:-1]) pandajob.jobName = '%s' % task[ 'tm_taskname'] #Needed by ASO and Dashboard if 'panda_oldjobid' in job and job['panda_oldjobid']: pandajob.parentID = job['panda_oldjobid'] pandajob.addFile(outFileSpec(log=True)) for filetoadd in alloutfiles: pandajob.addFile(filetoadd) return pandajob
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files = '' input_files_copy = '' for j in files_list: TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } input_files += ' ' + TMPMDSTFILE if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp /scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' else: input_files_copy += ' xrdcp -N -f root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-mdst' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDHISTFILE = '%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDHISTFILE = MERGEDHISTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDHISTFILE) MERGEDMDSTFILE = 'mDST-%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDMDSTFILE = MERGEDMDSTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDMDSTFILE) TMPHISTFILE = 'merge-%(runNumber)s-ch%(mergeChunkNumber)s.root' % { 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } logger.info(TMPHISTFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-mdst-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging mdst' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_mdst + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_mdst == 'failed': job.parentID = j.panda_id_merging_mdst if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (MERGEDMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_mdst = x[0] j_update.status_merging_mdst = 'sent' j_update.attempt_merging_mdst = j_update.attempt_merging_mdst + 1 j_update.chunk_number_merging_mdst = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files_copy = '' input_files_rm = '' for j in files_list: TMPDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp $dumpspath/' + TMPDUMPFILE + ' .;' else: input_files_copy += ' xrdcp -N -f $dumpspath/' + TMPDUMPFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-dump' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDDUMPFILE = 'evtdump%(prodSlt)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt } if format(merge_chunk_number, '03d') != '000': MERGEDDUMPFILE = MERGEDDUMPFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDDUMPFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft if j.task.site == 'BW_COMPASS_MCORE': dumpsPath = '/scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) else: dumpsPath = 'root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-dump-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging dump' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_evntdmp + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_evntdmp == 'failed': job.parentID = j.panda_id_merging_evntdmp if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOdump = FileSpec() fileOdump.lfn = "%s" % (MERGEDDUMPFILE) fileOdump.destinationDBlock = job.destinationDBlock fileOdump.destinationSE = job.destinationSE fileOdump.dataset = job.destinationDBlock fileOdump.type = 'output' job.addFile(fileOdump) # fileOstdout = FileSpec() # fileOstdout.lfn = "payload_stdout.txt" # fileOstdout.destinationDBlock = job.destinationDBlock # fileOstdout.destinationSE = job.destinationSE # fileOstdout.dataset = job.destinationDBlock # fileOstdout.type = 'output' # job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_evntdmp = x[0] j_update.status_merging_evntdmp = 'sent' j_update.attempt_merging_evntdmp = j_update.attempt_merging_evntdmp + 1 j_update.chunk_number_merging_evntdmp = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)