def defineEvgen16Job(self, i): """Define an Evgen16 job based on predefined values and randomly generated names """ job = JobSpec() job.computingSite = self.__site job.cloud = self.__cloud job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s_%d" % (uuid.uuid1(), i) job.AtlasRelease = 'Atlas-16.6.2' job.homepackage = 'AtlasProduction/16.6.2.1' job.transformation = 'Evgen_trf.py' job.destinationDBlock = self.__datasetName job.destinationSE = self.__destName job.currentPriority = 10000 job.prodSourceLabel = 'test' job.cmtConfig = 'i686-slc5-gcc43-opt' #Output file fileO = FileSpec() fileO.lfn = "%s.evgen.pool.root" % job.jobName fileO.destinationDBlock = job.destinationDBlock fileO.destinationSE = job.destinationSE fileO.dataset = job.destinationDBlock fileO.destinationDBlockToken = 'ATLASDATADISK' fileO.type = 'output' job.addFile(fileO) #Log file fileL = FileSpec() fileL.lfn = "%s.job.log.tgz" % job.jobName fileL.destinationDBlock = job.destinationDBlock fileL.destinationSE = job.destinationSE fileL.dataset = job.destinationDBlock fileL.destinationDBlockToken = 'ATLASDATADISK' fileL.type = 'log' job.addFile(fileL) job.jobParameters = "2760 105048 19901 101 200 MC10.105048.PythiaB_ccmu3mu1X.py %s NONE NONE NONE MC10JobOpts-latest-test.tar.gz" % fileO.lfn return job
def run(self): for i in range(1): prodDBlock = 'rome.004201.evgen.ZeeJimmy' destinationDBlock = 'pandatest.000123.test.simul' destinationSE = 'BNL_SE' jobs = [] #for i in range(self.interval): for i in range(2): job = JobSpec() job.jobDefinitionID=self.jobDefinitionID job.AtlasRelease='Atlas-11.0.1' job.prodDBlock=prodDBlock job.destinationDBlock=destinationDBlock job.destinationSE=destinationSE job.currentPriority=i lfnI = 'rome.004201.evgen.ZeeJimmy._00001.pool.root' file = FileSpec() file.lfn = lfnI file.dataset = 'rome.004201.evgen.ZeeJimmy' file.type = 'input' file.prodDBlock = prodDBlock file.dataset = prodDBlock job.addFile(file) lfnO ='%s.pool.root.1' % commands.getoutput('uuidgen') file = FileSpec() file.lfn = lfnO file.type = 'output' file.destinationDBlock = destinationDBlock file.dataset = destinationDBlock file.destinationSE = destinationSE job.addFile(file) job.homepackage='JobTransforms-11-00-01-01' job.transformation='share/rome.g4sim.standard.trf' job.jobParameters='%s %s 1 2 14268' % (lfnI,lfnO) jobs.append(job) self.taskbuffer.storeJobs(jobs,None) time.sleep(self.interval)
def outFileSpec(of=None, log=False): """Local routine to create an FileSpec for the an job output/log file :arg str of: output file base name :return: FileSpec object for the output file.""" outfile = FileSpec() if log: outfile.lfn = "job.log_%d_%s.tgz" % (jobid, lfnhanger) outfile.type = 'log' else: outfile.lfn = '%s_%d_%s%s' % (os.path.splitext(of)[0], jobid, lfnhanger, os.path.splitext(of)[1]) outfile.type = 'output' outfile.destinationDBlock = pandajob.destinationDBlock outfile.destinationSE = task['tm_asyncdest'] outfile.dataset = pandajob.destinationDBlock return outfile
def createJobSpec(nodes, walltime, command, jobName, outputFile=None): transformation = '#json#' datasetName = 'panda.destDB.%s' % subprocess.check_output('uuidgen') destName = 'local' prodSourceLabel = 'user' currentPriority = 1000 job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = jobName job.VO = VO job.transformation = transformation job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = currentPriority job.prodSourceLabel = prodSourceLabel job.computingSite = QUEUE_NAME job.cmtConfig = json.dumps({'name': job.jobName, 'next': None}) lqcd_command = { "nodes": nodes, "walltime": walltime, "name": job.jobName, "command": command } if (outputFile): lqcd_command['outputFile'] = outputFile job.jobParameters = json.dumps(lqcd_command) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName.strip() fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) #job.cmtConfig = None return job
def createJob(self, name, nodes, walltime, command, inputs=None, queuename=None): job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s" % commands.getoutput('uuidgen') job.VO = self.vo job.transformation = self.transformation job.destinationDBlock = self.datasetName job.destinationSE = self.destName job.currentPriority = self.currentPriority job.prodSourceLabel = self.prodSourceLabel job.computingSite = self.site if queuename is None else queuename lqcd_command = { "nodes": nodes, "walltime": walltime, "name": name, "command": command } job.jobParameters = json.dumps(lqcd_command) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) job.cmtConfig = inputs return job
for i in range(1): job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s_%d" % (commands.getoutput('uuidgen'), i) job.AtlasRelease = 'Atlas-17.0.5' job.homepackage = 'AtlasProduction/17.0.5.6' job.transformation = 'Evgen_trf.py' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 10000 job.prodSourceLabel = 'test' job.computingSite = site job.cloud = cloud job.cmtConfig = 'i686-slc5-gcc43-opt' file = FileSpec() file.lfn = "%s.evgen.pool.root" % job.jobName file.destinationDBlock = job.destinationDBlock file.destinationSE = job.destinationSE file.dataset = job.destinationDBlock file.destinationDBlockToken = 'ATLASDATADISK' file.type = 'output' job.addFile(file) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.destinationDBlockToken = 'ATLASDATADISK' fileOL.type = 'log'
import getpass passwd = getpass.getpass() pool = DBProxyPool('adbpro.usatlas.bnl.gov', passwd, 2) proxy = pool.getProxy() import sys import commands job1 = JobSpec() job1.PandaID = 'NULL' job1.jobStatus = 'unknown' job1.computingSite = "aaa" f11 = FileSpec() f11.lfn = 'in1.pool.root' f11.type = 'input' job1.addFile(f11) f12 = FileSpec() f12.lfn = 'out1.pool.root' f12.type = 'output' job1.addFile(f12) job2 = JobSpec() job2.PandaID = 'NULL' job2.jobStatus = 'unknown' job2.computingSite = "bbb" f21 = FileSpec() f21.lfn = 'in2.pool.root' f21.type = 'input'
# append pacball to arg if pacFile != None: argStr += "--pacball %s " % pacFile job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s_%s" % (siteName,commands.getoutput('uuidgen')) job.transformation = 'http://www.usatlas.bnl.gov/svn/panda/apps/sw/installAtlasSW' job.destinationDBlock = 'panda.%s' % job.jobName job.currentPriority = 10000 job.prodSourceLabel = 'software' job.computingSite = siteName job.cloud = 'US' fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) # pacball if pacDS != None: job.prodDBlock = pacDS fileP = FileSpec() fileP.dataset = pacDS fileP.prodDBlock = pacDS fileP.lfn = pacFile fileP.type = 'input' job.addFile(fileP)
job.jobDefinitionID = (time.time()) % 10000 job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index) job.AtlasRelease = 'Atlas-17.0.5' job.homepackage = 'AtlasProduction/17.0.5.6' job.transformation = 'AtlasG4_trf.py' job.destinationDBlock = datasetName job.computingSite = site job.prodDBlock = prodDBlock job.prodSourceLabel = 'test' job.processingType = 'test' job.currentPriority = 10000 job.cloud = cloud job.cmtConfig = 'i686-slc5-gcc43-opt' fileI = FileSpec() fileI.dataset = job.prodDBlock fileI.prodDBlock = job.prodDBlock fileI.lfn = lfn fileI.type = 'input' job.addFile(fileI) fileD = FileSpec() fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v170602' fileD.prodDBlock = fileD.dataset fileD.lfn = 'DBRelease-17.6.2.tar.gz' fileD.type = 'input' job.addFile(fileD) fileOA = FileSpec() fileOA.lfn = "%s.HITS.pool.root" % job.jobName
def run(self, data): datasetName = 'panda:panda.destDB.%s' % commands.getoutput('uuidgen') destName = 'ANALY_RRC-KI-HPC' site = 'ANALY_RRC-KI-HPC' scope = config['DEFAULT_SCOPE'] distributive = data['distributive'] release = data['release'] parameters = data['parameters'] input_type = data['input_type'] input_params = data['input_params'] input_files = data['input_files'] output_type = data['output_type'] output_params = data['output_params'] output_files = data['output_files'] jobid = data['jobid'] _logger.debug('Jobid: ' + str(jobid)) job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = commands.getoutput('uuidgen') job.transformation = config['DEFAULT_TRF'] job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 1000 job.prodSourceLabel = 'user' job.computingSite = site job.cloud = 'RU' job.prodDBlock = "%s:%s.%s" % (scope, scope, job.jobName) job.jobParameters = '%s %s "%s"' % (release, distributive, parameters) params = {} _logger.debug('MoveData') ec = 0 ec, uploaded_input_files = movedata( params=params, fileList=input_files, fromType=input_type, fromParams=input_params, toType='hpc', toParams={'dest': '/' + re.sub(':', '/', job.prodDBlock)}) if ec != 0: _logger.error('Move data error: ' + ec[1]) return for file in uploaded_input_files: fileIT = FileSpec() fileIT.lfn = file fileIT.dataset = job.prodDBlock fileIT.prodDBlock = job.prodDBlock fileIT.type = 'input' fileIT.scope = scope fileIT.status = 'ready' fileIT.GUID = commands.getoutput('uuidgen') job.addFile(fileIT) for file in output_files: fileOT = FileSpec() fileOT.lfn = file fileOT.destinationDBlock = job.prodDBlock fileOT.destinationSE = job.destinationSE fileOT.dataset = job.prodDBlock fileOT.type = 'output' fileOT.scope = scope fileOT.GUID = commands.getoutput('uuidgen') job.addFile(fileOT) fileOL = FileSpec() fileOL.lfn = "%s.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' fileOL.scope = 'panda' job.addFile(fileOL) self.jobList.append(job) #submitJob o = self.submitJobs(self.jobList) x = o[0] #update PandaID conn = MySQLdb.connect( host=self.dbhost, db=self.dbname, # port=self.dbport, connect_timeout=self.dbtimeout, user=self.dbuser, passwd=self.dbpasswd) cur = conn.cursor() try: varDict = {} PandaID = int(x[0]) varDict['id'] = jobid varDict['pandaId'] = PandaID sql = "UPDATE %s SET %s.pandaId=%s WHERE %s.id=%s" % ( self.table_jobs, self.table_jobs, varDict['pandaId'], self.table_jobs, varDict['id']) cur.execute(sql, varDict) except: _logger.error('SENDJOB: Incorrect server response') try: conn.commit() return True except: _logger.error("commit error") return False
for i in range(1): for lfn in files.keys(): job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = commands.getoutput('uuidgen') job.AtlasRelease = 'Atlas-13.0.35' job.homepackage = 'AtlasPoint1/13.0.35.1' job.transformation = 'csc_cosmics_trf.py' job.destinationDBlock = datasetName job.cloud = cloud job.computingSite = site job.prodDBlock = files[lfn] job.prodSourceLabel = 'test' job.currentPriority = 1001 fileI = FileSpec() fileI.dataset = job.prodDBlock fileI.prodDBlock = job.prodDBlock fileI.lfn = lfn fileI.type = 'input' job.addFile(fileI) fileD = FileSpec() fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v030101' fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v030101' fileD.lfn = 'DBRelease-3.1.1.tar.gz' fileD.type = 'input' # job.addFile(fileD) fileO1 = FileSpec() fileO1.lfn = "%s.ESD.pool.root" % job.jobName
def main(): logger.info('Getting tasks with status send and running') # tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running')) tasks_list = Task.objects.all().filter(name='dvcs2016P09t2r13v1_mu+') logger.info('Got list of %s tasks' % len(tasks_list)) for t in tasks_list: logger.info('Getting jobs in status defined or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:max_send_amount] else: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) i = 0 for j in jobs_list: if i >= max_send_amount: break logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'COMPASSPRODDISK' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) TMPRAWFILE = j.file[j.file.rfind('/') + 1:] logger.info(TMPRAWFILE) TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'input_file': j.file, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } logger.info(TMPMDSTFILE) TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(TMPHISTFILE) TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(TMPRICHFILE) EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'prodSlt': j.task.prodslt, 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(EVTDUMPFILE) STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % { 'prodNameOnly': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDOUTFILE) STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % { 'prodNameOnly': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDERRFILE) try: file_year = j.file.split('/')[5] logger.info(file_year) except: logger.error('Error while splitting file to get year') sys.exit(1) ProdPathAndName = j.task.home + j.task.path + j.task.soft job = JobSpec() job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % { 'prodName': j.task.soft, 'fileYear': file_year, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 job.prodSourceLabel = 'prod_test' job.computingSite = site job.attemptNr = j.attempt + 1 job.maxAttempt = j.task.max_attempts if j.status == 'failed': job.parentID = j.panda_id head, tail = os.path.split(j.file) # job.transferType = 'direct' job.sourceSite = 'CERN_COMPASS_PROD' # logs, and all files generated during execution will be placed in log (except output file) #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName} job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.soft, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } fileIRaw = FileSpec() fileIRaw.lfn = "%s" % (j.file) fileIRaw.GUID = '5874a461-61d3-4543-8f34-6fd7a4624e78' fileIRaw.fsize = 1073753368 fileIRaw.checksum = '671608be' fileIRaw.destinationDBlock = job.destinationDBlock fileIRaw.destinationSE = job.destinationSE fileIRaw.dataset = job.destinationDBlock fileIRaw.type = 'input' job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.txt" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.txt" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr) fileOLog = FileSpec() fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % { 'prodName': j.task.soft, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (TMPMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOTrafdic = FileSpec() fileOTrafdic.lfn = "%s" % (TMPHISTFILE) fileOTrafdic.destinationDBlock = job.destinationDBlock fileOTrafdic.destinationSE = job.destinationSE fileOTrafdic.dataset = job.destinationDBlock fileOTrafdic.type = 'output' job.addFile(fileOTrafdic) fileOtestevtdump = FileSpec() fileOtestevtdump.lfn = "testevtdump.raw" fileOtestevtdump.destinationDBlock = job.destinationDBlock fileOtestevtdump.destinationSE = job.destinationSE fileOtestevtdump.dataset = job.destinationDBlock fileOtestevtdump.type = 'output' job.addFile(fileOtestevtdump) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) logger.info(o) # for x in o: # logger.info("PandaID=%s" % x[0]) # today = datetime.datetime.today() # # if x[0] != 0 and x[0] != 'NULL': # j_update = Job.objects.get(id=j.id) # j_update.panda_id = x[0] # j_update.status = 'sent' # j_update.attempt = j_update.attempt + 1 # j_update.date_updated = today # # try: # j_update.save() # logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today)) # except IntegrityError as e: # logger.exception('Unique together catched, was not saved') # except DatabaseError as e: # logger.exception('Something went wrong while saving: %s' % e.message) # else: # logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig): '''prepare the subjob specific configuration''' from pandatools import Client from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec job = app._getParent() logger.debug('AthenaPandaRTHandler prepare called for %s', job.getFQID('.')) # in case of a simple job get the dataset content, otherwise subjobs are filled by the splitter if job.inputdata and not job._getRoot().subjobs: if not job.inputdata.names: contents = job.inputdata.get_contents(overlap=False, size=True) for ds in contents.keys(): for f in contents[ds]: job.inputdata.guids.append(f[0]) job.inputdata.names.append(f[1][0]) job.inputdata.sizes.append(f[1][1]) job.inputdata.checksums.append(f[1][2]) job.inputdata.scopes.append(f[1][3]) site = job._getRoot().backend.site job.backend.site = site job.backend.actualCE = site cloud = job._getRoot().backend.requirements.cloud job.backend.requirements.cloud = cloud # if no outputdata are given if not job.outputdata: job.outputdata = DQ2OutputDataset() job.outputdata.datasetname = job._getRoot().outputdata.datasetname #if not job.outputdata.datasetname: else: job.outputdata.datasetname = job._getRoot().outputdata.datasetname if not job.outputdata.datasetname: raise ApplicationConfigurationError( None, 'DQ2OutputDataset has no datasetname') jspec = JobSpec() jspec.jobDefinitionID = job._getRoot().id jspec.jobName = commands.getoutput('uuidgen 2> /dev/null') jspec.transformation = '%s/runGen-00-00-02' % Client.baseURLSUB if job.inputdata: jspec.prodDBlock = job.inputdata.dataset[0] else: jspec.prodDBlock = 'NULL' jspec.destinationDBlock = job.outputdata.datasetname if job.outputdata.location: if not job._getRoot().subjobs or job.id == 0: logger.warning( 'You have specified outputdata.location. Note that Panda may not support writing to a user-defined output location.' ) jspec.destinationSE = job.outputdata.location else: jspec.destinationSE = site jspec.prodSourceLabel = configPanda['prodSourceLabelRun'] jspec.processingType = configPanda['processingType'] jspec.assignedPriority = configPanda['assignedPriorityRun'] jspec.cloud = cloud # memory if job.backend.requirements.memory != -1: jspec.minRamCount = job.backend.requirements.memory # cputime if job.backend.requirements.cputime != -1: jspec.maxCpuCount = job.backend.requirements.cputime jspec.computingSite = site # library (source files) if job.backend.libds: flib = FileSpec() flib.lfn = self.fileBO.lfn flib.GUID = self.fileBO.GUID flib.type = 'input' flib.status = self.fileBO.status flib.dataset = self.fileBO.destinationDBlock flib.dispatchDBlock = self.fileBO.destinationDBlock jspec.addFile(flib) elif job.backend.bexec: flib = FileSpec() flib.lfn = self.library flib.type = 'input' flib.dataset = self.libDataset flib.dispatchDBlock = self.libDataset jspec.addFile(flib) # input files FIXME: many more input types if job.inputdata: for guid, lfn, size, checksum, scope in zip( job.inputdata.guids, job.inputdata.names, job.inputdata.sizes, job.inputdata.checksums, job.inputdata.scopes): finp = FileSpec() finp.lfn = lfn finp.GUID = guid finp.scope = scope # finp.fsize = # finp.md5sum = finp.dataset = job.inputdata.dataset[0] finp.prodDBlock = job.inputdata.dataset[0] finp.dispatchDBlock = job.inputdata.dataset[0] finp.type = 'input' finp.status = 'ready' jspec.addFile(finp) # output files # outMap = {} #FIXME: if options.outMeta != []: self.rundirectory = "." # log files flog = FileSpec() flog.lfn = '%s._$PANDAID.log.tgz' % job.outputdata.datasetname flog.type = 'log' flog.dataset = job.outputdata.datasetname flog.destinationDBlock = job.outputdata.datasetname flog.destinationSE = job.backend.site jspec.addFile(flog) # job parameters param = '' # source URL matchURL = re.search("(http.*://[^/]+)/", Client.baseURLCSRVSSL) srcURL = "" if matchURL != None: srcURL = matchURL.group(1) param += " --sourceURL %s " % srcURL param += '-r "%s" ' % self.rundirectory exe_name = job.application.exe if job.backend.bexec == '': if hasattr(job.application.exe, "name"): exe_name = os.path.basename(job.application.exe.name) # set jobO parameter if job.application.args: param += ' -j "" -p "%s %s" ' % ( exe_name, urllib.quote(" ".join(job.application.args))) else: param += ' -j "" -p "%s" ' % exe_name if self.inputsandbox: param += ' -a %s ' % self.inputsandbox else: param += '-l %s ' % self.library param += '-j "" -p "%s %s" ' % ( exe_name, urllib.quote(" ".join(job.application.args))) if job.inputdata: param += '-i "%s" ' % job.inputdata.names # fill outfiles outfiles = {} for f in self.extOutFile: tarnum = 1 if f.find('*') != -1: # archive * outfiles[f] = "outputbox%i.%s.%s.tar.gz" % ( tarnum, job.getFQID('.'), time.strftime("%Y%m%d%H%M%S")) tarnum += 1 else: outfiles[f] = "%s.%s.%s" % (f, job.getFQID('.'), time.strftime("%Y%m%d%H%M%S")) fout = FileSpec() fout.lfn = outfiles[f] fout.type = 'output' fout.dataset = job.outputdata.datasetname fout.destinationDBlock = job.outputdata.datasetname fout.destinationSE = job.backend.site jspec.addFile(fout) param += '-o "%s" ' % ( outfiles ) # must be double quotes, because python prints strings in 'single quotes' for file in jspec.Files: if file.type in ['output', 'log'] and configPanda['chirpconfig']: file.dispatchDBlockToken = configPanda['chirpconfig'] logger.debug('chirp file %s', file) jspec.jobParameters = param return jspec
#job.homepackage = 'AtlasProduction/20.20.8.4' job.transformation = 'Reco_tf.py' job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('uuidgen') job.destinationSE = 'AGLT2_TEST' job.prodDBlock = 'user.mlassnig:user.mlassnig.pilot.test.single.hits' job.currentPriority = 1000 #job.prodSourceLabel = 'ptest' job.prodSourceLabel = 'user' job.computingSite = site job.cloud = cloud job.cmtConfig = 'x86_64-slc6-gcc48-opt' job.specialHandling = 'ddm:rucio' #job.transferType = 'direct' ifile = 'HITS.06828093._000096.pool.root.1' fileI = FileSpec() fileI.GUID = 'AC5B3759-B606-BA42-8681-4BD86455AE02' fileI.checksum = 'ad:5d000974' fileI.dataset = 'user.mlassnig:user.mlassnig.pilot.test.single.hits' fileI.fsize = 94834717 fileI.lfn = ifile fileI.prodDBlock = job.prodDBlock fileI.scope = 'mc15_13TeV' fileI.type = 'input' job.addFile(fileI) ofile = 'RDO_%s.root' % commands.getoutput('uuidgen') fileO = FileSpec() fileO.dataset = job.destinationDBlock fileO.destinationDBlock = job.destinationDBlock fileO.destinationSE = job.destinationSE
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'), index) job.AtlasRelease = 'Atlas-14.1.0\nAtlas-14.1.0' job.homepackage = 'AtlasProduction/14.1.0.3\nAtlasProduction/14.1.0.3' job.transformation = 'csc_digi_trf.py\ncsc_reco_trf.py' job.destinationDBlock = datasetName job.computingSite = site job.prodDBlock = 'valid1.005200.T1_McAtNlo_Jimmy.simul.HITS.e322_s429_tid022081' job.prodSourceLabel = 'test' job.currentPriority = 10000 job.cloud = 'US' for lfn in ['HITS.022081._00001.pool.root', 'HITS.022081._00002.pool.root']: fileI = FileSpec() fileI.dataset = job.prodDBlock fileI.prodDBlock = job.prodDBlock fileI.lfn = lfn fileI.type = 'input' job.addFile(fileI) fileD1 = FileSpec() fileD1.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050001' fileD1.prodDBlock = fileD1.dataset fileD1.lfn = 'DBRelease-5.0.1.tar.gz' fileD1.type = 'input' job.addFile(fileD1) fileD2 = FileSpec() fileD2.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050101'
def main(): logger.info('Getting tasks with status send and running') # tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running')) tasks_list = Task.objects.all().filter(name='TestTaskBW') logger.info('Got list of %s tasks' % len(tasks_list)) for t in tasks_list: logger.info('Getting jobs in status defined or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:max_send_amount] else: jobs_list = Job.objects.all().filter( task=t).order_by('id')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) i = 0 for j in jobs_list: if i >= max_send_amount: break logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) job = JobSpec() job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = 'hello world' job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 job.prodSourceLabel = 'test' job.computingSite = site job.attemptNr = 1 job.maxAttempt = 5 job.sourceSite = 'BW_COMPASS_MCORE' job.VO = 'local' # logs, and all files generated during execution will be placed in log (except output file) job.jobParameters = 'python /u/sciteam/petrosya/panda/hello.py' fileOLog = FileSpec() fileOLog.lfn = "log.job.log.tgz" fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) logger.info(o) # for x in o: # logger.info("PandaID=%s" % x[0]) # today = datetime.datetime.today() # # if x[0] != 0 and x[0] != 'NULL': # j_update = Job.objects.get(id=j.id) # j_update.panda_id = x[0] # j_update.status = 'sent' # j_update.attempt = j_update.attempt + 1 # j_update.date_updated = today # # try: # j_update.save() # logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today)) # except IntegrityError as e: # logger.exception('Unique together catched, was not saved') # except DatabaseError as e: # logger.exception('Something went wrong while saving: %s' % e.message) # else: # logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files_copy = '' input_files_rm = '' for j in files_list: TMPDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp $dumpspath/' + TMPDUMPFILE + ' .;' else: input_files_copy += ' xrdcp -N -f $dumpspath/' + TMPDUMPFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-dump' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDDUMPFILE = 'evtdump%(prodSlt)s-%(runNumber)s.raw' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt } if format(merge_chunk_number, '03d') != '000': MERGEDDUMPFILE = MERGEDDUMPFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDDUMPFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft if j.task.site == 'BW_COMPASS_MCORE': dumpsPath = '/scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) else: dumpsPath = 'root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/evtdump/slot' + str( task.prodslt) job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-dump-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging dump' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_evntdmp + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_evntdmp == 'failed': job.parentID = j.panda_id_merging_evntdmp if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % { 'MERGEDDUMPFILE': MERGEDDUMPFILE, 'dumpsPath': dumpsPath, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOdump = FileSpec() fileOdump.lfn = "%s" % (MERGEDDUMPFILE) fileOdump.destinationDBlock = job.destinationDBlock fileOdump.destinationSE = job.destinationSE fileOdump.dataset = job.destinationDBlock fileOdump.type = 'output' job.addFile(fileOdump) # fileOstdout = FileSpec() # fileOstdout.lfn = "payload_stdout.txt" # fileOstdout.destinationDBlock = job.destinationDBlock # fileOstdout.destinationSE = job.destinationSE # fileOstdout.dataset = job.destinationDBlock # fileOstdout.type = 'output' # job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_evntdmp = x[0] j_update.status_merging_evntdmp = 'sent' j_update.attempt_merging_evntdmp = j_update.attempt_merging_evntdmp + 1 j_update.chunk_number_merging_evntdmp = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)
'tail': tail, 'prodSlt': j.task.prodSlt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } # fileIRaw = FileSpec() # fileIRaw.lfn = "%s" % (input_file) # fileIRaw.destinationDBlock = job.destinationDBlock # fileIRaw.destinationSE = job.destinationSE # fileIRaw.dataset = job.destinationDBlock # fileIRaw.type = 'input' # job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.txt" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.txt" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr)
def send_job(jobid, siteid): _logger.debug('Jobid: ' + str(jobid)) site = sites_.get(siteid) job = jobs_.get(int(jobid)) cont = job.container files_catalog = cont.files fscope = getScope(job.owner.username) datasetName = '{}:{}'.format(fscope, cont.guid) distributive = job.distr.name release = job.distr.release # Prepare runScript parameters = job.distr.command parameters = parameters.replace("$COMMAND$", job.params) parameters = parameters.replace("$USERNAME$", job.owner.username) parameters = parameters.replace("$WORKINGGROUP$", job.owner.working_group) # Prepare metadata metadata = dict(user=job.owner.username) # Prepare PanDA Object pandajob = JobSpec() pandajob.jobDefinitionID = int(time.time()) % 10000 pandajob.jobName = cont.guid pandajob.transformation = client_config.DEFAULT_TRF pandajob.destinationDBlock = datasetName pandajob.destinationSE = site.se pandajob.currentPriority = 1000 pandajob.prodSourceLabel = 'user' pandajob.computingSite = site.ce pandajob.cloud = 'RU' pandajob.VO = 'atlas' pandajob.prodDBlock = "%s:%s" % (fscope, pandajob.jobName) pandajob.coreCount = job.corecount pandajob.metadata = json.dumps(metadata) #pandajob.workingGroup = job.owner.working_group if site.encode_commands: # It requires script wrapper on cluster side pandajob.jobParameters = '%s %s %s "%s"' % (cont.guid, release, distributive, parameters) else: pandajob.jobParameters = parameters has_input = False for fcc in files_catalog: if fcc.type == 'input': f = fcc.file guid = f.guid fileIT = FileSpec() fileIT.lfn = f.lfn fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = guid pandajob.addFile(fileIT) has_input = True if fcc.type == 'output': f = fcc.file fileOT = FileSpec() fileOT.lfn = f.lfn fileOT.destinationDBlock = pandajob.prodDBlock fileOT.destinationSE = pandajob.destinationSE fileOT.dataset = pandajob.prodDBlock fileOT.type = 'output' fileOT.scope = fscope fileOT.GUID = f.guid pandajob.addFile(fileOT) # Save replica meta fc.new_replica(f, site) if not has_input: # Add fake input fileIT = FileSpec() fileIT.lfn = "fake.input" fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = "fake.guid" pandajob.addFile(fileIT) # Prepare lof file fileOL = FileSpec() fileOL.lfn = "%s.log.tgz" % pandajob.jobName fileOL.destinationDBlock = pandajob.destinationDBlock fileOL.destinationSE = pandajob.destinationSE fileOL.dataset = '{}:logs'.format(fscope) fileOL.type = 'log' fileOL.scope = 'panda' pandajob.addFile(fileOL) # Save log meta log = File() log.scope = fscope log.lfn = fileOL.lfn log.guid = getGUID(log.scope, log.lfn) log.type = 'log' log.status = 'defined' files_.save(log) # Save replica meta fc.new_replica(log, site) # Register file in container fc.reg_file_in_cont(log, cont, 'log') # Submit job o = submitJobs([pandajob]) x = o[0] try: #update PandaID PandaID = int(x[0]) job.pandaid = PandaID job.ce = site.ce except: job.status = 'submit_error' jobs_.save(job) return 0
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig): """Prepare the specific aspec of each subjob. Returns: subjobconfig list of objects understood by backends.""" from pandatools import Client from pandatools import AthenaUtils from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec from GangaAtlas.Lib.ATLASDataset.DQ2Dataset import dq2_set_dataset_lifetime from GangaPanda.Lib.Panda.Panda import refreshPandaSpecs # make sure we have the correct siteType refreshPandaSpecs() job = app._getParent() masterjob = job._getRoot() logger.debug('ProdTransPandaRTHandler prepare called for %s', job.getFQID('.')) job.backend.actualCE = job.backend.site job.backend.requirements.cloud = Client.PandaSites[ job.backend.site]['cloud'] # check that the site is in a submit-able status if not job.splitter or job.splitter._name != 'DQ2JobSplitter': allowed_sites = job.backend.list_ddm_sites() try: outDsLocation = Client.PandaSites[job.backend.site]['ddm'] tmpDsExist = False if (configPanda['processingType'].startswith('gangarobot') or configPanda['processingType'].startswith('hammercloud')): #if Client.getDatasets(job.outputdata.datasetname): if getDatasets(job.outputdata.datasetname): tmpDsExist = True logger.info('Re-using output dataset %s' % job.outputdata.datasetname) if not configPanda[ 'specialHandling'] == 'ddm:rucio' and not configPanda[ 'processingType'].startswith( 'gangarobot' ) and not configPanda['processingType'].startswith( 'hammercloud') and not configPanda[ 'processingType'].startswith('rucio_test'): Client.addDataset(job.outputdata.datasetname, False, location=outDsLocation, allowProdDisk=True, dsExist=tmpDsExist) logger.info('Output dataset %s registered at %s' % (job.outputdata.datasetname, outDsLocation)) dq2_set_dataset_lifetime(job.outputdata.datasetname, outDsLocation) except exceptions.SystemExit: raise BackendError( 'Panda', 'Exception in adding dataset %s: %s %s' % (job.outputdata.datasetname, sys.exc_info()[0], sys.exc_info()[1])) # JobSpec. jspec = JobSpec() jspec.currentPriority = app.priority jspec.jobDefinitionID = masterjob.id jspec.jobName = commands.getoutput('uuidgen 2> /dev/null') jspec.coreCount = app.core_count jspec.AtlasRelease = 'Atlas-%s' % app.atlas_release jspec.homepackage = app.home_package jspec.transformation = app.transformation jspec.destinationDBlock = job.outputdata.datasetname if job.outputdata.location: jspec.destinationSE = job.outputdata.location else: jspec.destinationSE = job.backend.site if job.inputdata: jspec.prodDBlock = job.inputdata.dataset[0] else: jspec.prodDBlock = 'NULL' if app.prod_source_label: jspec.prodSourceLabel = app.prod_source_label else: jspec.prodSourceLabel = configPanda['prodSourceLabelRun'] jspec.processingType = configPanda['processingType'] jspec.specialHandling = configPanda['specialHandling'] jspec.computingSite = job.backend.site jspec.cloud = job.backend.requirements.cloud jspec.cmtConfig = app.atlas_cmtconfig if app.dbrelease == 'LATEST': try: latest_dbrelease = getLatestDBReleaseCaching() except: from pandatools import Client latest_dbrelease = Client.getLatestDBRelease() m = re.search('(.*):DBRelease-(.*)\.tar\.gz', latest_dbrelease) if m: self.dbrelease_dataset = m.group(1) self.dbrelease = m.group(2) else: raise ApplicationConfigurationError( None, "Error retrieving LATEST DBRelease. Try setting application.dbrelease manually." ) else: self.dbrelease_dataset = app.dbrelease_dataset self.dbrelease = app.dbrelease jspec.jobParameters = app.job_parameters if self.dbrelease: if self.dbrelease == 'current': jspec.jobParameters += ' --DBRelease=current' else: if jspec.transformation.endswith( "_tf.py") or jspec.transformation.endswith("_tf"): jspec.jobParameters += ' --DBRelease=DBRelease-%s.tar.gz' % ( self.dbrelease, ) else: jspec.jobParameters += ' DBRelease=DBRelease-%s.tar.gz' % ( self.dbrelease, ) dbspec = FileSpec() dbspec.lfn = 'DBRelease-%s.tar.gz' % self.dbrelease dbspec.dataset = self.dbrelease_dataset dbspec.prodDBlock = jspec.prodDBlock dbspec.type = 'input' jspec.addFile(dbspec) if job.inputdata: m = re.search('(.*)\.(.*)\.(.*)\.(.*)\.(.*)\.(.*)', job.inputdata.dataset[0]) if not m: logger.error("Error retrieving run number from dataset name") #raise ApplicationConfigurationError(None, "Error retrieving run number from dataset name") runnumber = 105200 else: runnumber = int(m.group(2)) if jspec.transformation.endswith( "_tf.py") or jspec.transformation.endswith("_tf"): jspec.jobParameters += ' --runNumber %d' % runnumber else: jspec.jobParameters += ' RunNumber=%d' % runnumber # Output files. randomized_lfns = [] ilfn = 0 for lfn, lfntype in zip(app.output_files, app.output_type): ofspec = FileSpec() if app.randomize_lfns: randomized_lfn = lfn + ( '.%s.%d.%s' % (job.backend.site, int(time.time()), commands.getoutput('uuidgen 2> /dev/null')[:4])) else: randomized_lfn = lfn ofspec.lfn = randomized_lfn randomized_lfns.append(randomized_lfn) ofspec.destinationDBlock = jspec.destinationDBlock ofspec.destinationSE = jspec.destinationSE ofspec.dataset = jspec.destinationDBlock ofspec.type = 'output' jspec.addFile(ofspec) if jspec.transformation.endswith( "_tf.py") or jspec.transformation.endswith("_tf"): jspec.jobParameters += ' --output%sFile %s' % ( lfntype, randomized_lfns[ilfn]) else: jspec.jobParameters += ' output%sFile=%s' % ( lfntype, randomized_lfns[ilfn]) ilfn = ilfn + 1 # Input files. if job.inputdata: for guid, lfn, size, checksum, scope in zip( job.inputdata.guids, job.inputdata.names, job.inputdata.sizes, job.inputdata.checksums, job.inputdata.scopes): ifspec = FileSpec() ifspec.lfn = lfn ifspec.GUID = guid ifspec.fsize = size ifspec.md5sum = checksum ifspec.scope = scope ifspec.dataset = jspec.prodDBlock ifspec.prodDBlock = jspec.prodDBlock ifspec.type = 'input' jspec.addFile(ifspec) if app.input_type: itype = app.input_type else: itype = m.group(5) if jspec.transformation.endswith( "_tf.py") or jspec.transformation.endswith("_tf"): jspec.jobParameters += ' --input%sFile %s' % (itype, ','.join( job.inputdata.names)) else: jspec.jobParameters += ' input%sFile=%s' % (itype, ','.join( job.inputdata.names)) # Log files. lfspec = FileSpec() lfspec.lfn = '%s.job.log.tgz' % jspec.jobName lfspec.destinationDBlock = jspec.destinationDBlock lfspec.destinationSE = jspec.destinationSE lfspec.dataset = jspec.destinationDBlock lfspec.type = 'log' jspec.addFile(lfspec) return jspec
def main(): logger.info('Getting tasks with status send and running') tasks_list = Task.objects.all().filter( Q(status='send') | Q(status='running')) #tasks_list = Task.objects.all().filter(name='dvcs2017align7_mu-') logger.info('Got list of %s tasks' % len(tasks_list)) cdbServerArr = ['compassvm23.cern.ch', 'compassvm24.cern.ch'] cdbServer = cdbServerArr[0] for t in tasks_list: max_send_amount = 1000 logger.info('Getting jobs in status staged or failed for task %s' % t) jobs_list_count = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).count() if jobs_list_count > 50: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:max_send_amount] else: jobs_list = Job.objects.all().filter(task=t).filter( attempt__lt=t.max_attempts).filter( Q(status='staged') | Q(status='failed')).order_by( '-number_of_events')[:jobs_list_count] logger.info('Got list of %s jobs' % len(jobs_list)) # jobs_list = Job.objects.all().filter(task=t).filter(file='/castor/cern.ch/compass/data/2017/raw/W04/cdr12116-278485.raw') i = 0 for j in jobs_list: if j.attempt >= j.task.max_attempts: logger.info( 'Number of retry attempts has reached for job %s of task %s' % (j.file, j.task.name)) continue if i > max_send_amount: break logger.info('Job %s of %s' % (i, max_send_amount)) logger.info('Going to send job %s of %s task' % (j.file, j.task.name)) umark = commands.getoutput('uuidgen') datasetName = 'panda.destDB.%s' % umark destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) TMPRAWFILE = j.file[j.file.rfind('/') + 1:] logger.info(TMPRAWFILE) TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'input_file': j.file, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } logger.info(TMPMDSTFILE) TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(TMPHISTFILE) TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(TMPRICHFILE) EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % { 'prodSlt': j.task.prodslt, 'runNumber': j.run_number, 'runChunk': j.chunk_number } logger.info(EVTDUMPFILE) STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDOUTFILE) STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % { 'prodNameOnly': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt } logger.info(STDERRFILE) PRODSOFT = j.task.soft logger.info(PRODSOFT) ProdPathAndName = j.task.home + j.task.path + j.task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = j.task.id job.jobDefinitionID = 0 job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % { 'prodName': j.task.production, 'fileYear': j.task.year, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } job.transformation = j.task.type # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 2000 if j.task.type == 'DDD filtering': job.currentPriority = 1000 job.prodSourceLabel = 'prod_test' job.computingSite = j.task.site job.attemptNr = j.attempt + 1 job.maxAttempt = j.task.max_attempts if j.status == 'failed': job.parentID = j.panda_id head, tail = os.path.split(j.file) cdbServer = cdbServerArr[random.randrange(len(cdbServerArr))] # logs, and all files generated during execution will be placed in log (except output file) #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName} if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;cp %(input_file)s .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;export CDBSERVER=%(cdbServer)s;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE, 'cdbServer': cdbServer } if j.task.type == 'DDD filtering': job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/src/DaqDataDecoding/examples/how-to/ddd --filter-CAL --out=testevtdump.raw %(TMPRAWFILE)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % { 'TMPRAWFILE': TMPRAWFILE, 'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'PRODSOFT': PRODSOFT, 'input_file': j.file, 'ProdPathAndName': ProdPathAndName, 'prodPath': j.task.path, 'prodName': j.task.production, 'template': j.task.template, 'tail': tail, 'prodSlt': j.task.prodslt, 'EVTDUMPFILE': EVTDUMPFILE, 'STDOUTFILE': STDOUTFILE, 'STDERRFILE': STDERRFILE } # fileIRaw = FileSpec() # fileIRaw.lfn = "%s" % (input_file) # fileIRaw.destinationDBlock = job.destinationDBlock # fileIRaw.destinationSE = job.destinationSE # fileIRaw.dataset = job.destinationDBlock # fileIRaw.type = 'input' # job.addFile(fileIRaw) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) fileOstderr = FileSpec() fileOstderr.lfn = "payload_stderr.out.gz" fileOstderr.destinationDBlock = job.destinationDBlock fileOstderr.destinationSE = job.destinationSE fileOstderr.dataset = job.destinationDBlock fileOstderr.type = 'output' job.addFile(fileOstderr) fileOLog = FileSpec() fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % { 'prodName': j.task.production, 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production': fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (TMPMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOTrafdic = FileSpec() fileOTrafdic.lfn = "%s" % (TMPHISTFILE) fileOTrafdic.destinationDBlock = job.destinationDBlock fileOTrafdic.destinationSE = job.destinationSE fileOTrafdic.dataset = job.destinationDBlock fileOTrafdic.type = 'output' job.addFile(fileOTrafdic) if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production' or j.task.type == 'DDD filtering': fileOtestevtdump = FileSpec() fileOtestevtdump.lfn = "testevtdump.raw" fileOtestevtdump.destinationDBlock = job.destinationDBlock fileOtestevtdump.destinationSE = job.destinationSE fileOtestevtdump.dataset = job.destinationDBlock fileOtestevtdump.type = 'output' job.addFile(fileOtestevtdump) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': j_update = Job.objects.get(id=j.id) j_update.panda_id = x[0] j_update.status = 'sent' j_update.attempt = j_update.attempt + 1 j_update.date_updated = timezone.now() try: j_update.save() logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], timezone.now())) if j_update.task.status == 'send': logger.info( 'Going to update status of task %s from send to running' % j_update.task.name) t_update = Task.objects.get(id=j_update.task.id) t_update.status = 'running' t_update.date_updated = timezone.now() try: t_update.save() logger.info('Task %s updated' % t_update.name) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) except IntegrityError as e: logger.exception( 'Unique together catched, was not saved') except DatabaseError as e: logger.exception( 'Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id) i += 1 logger.info('done')
job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = "%s_%d" % (commands.getoutput('uuidgen'), index) job.AtlasRelease = 'Atlas-13.0.40' job.homepackage = 'AtlasProduction/13.0.40.3' job.transformation = 'csc_simul_trf.py' job.destinationDBlock = datasetName job.destinationSE = destName job.computingSite = site job.prodDBlock = 'valid1.005001.pythia_minbias.evgen.EVNT.e306_tid019128' job.prodSourceLabel = 'test' job.currentPriority = 10000 job.cloud = 'IT' fileI = FileSpec() fileI.dataset = job.prodDBlock fileI.prodDBlock = job.prodDBlock fileI.lfn = lfn fileI.type = 'input' job.addFile(fileI) fileD = FileSpec() fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v040701' fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v030101' fileD.lfn = 'DBRelease-4.7.1.tar.gz' fileD.type = 'input' job.addFile(fileD) fileOE = FileSpec() fileOE.lfn = "%s.HITS.pool.root" % job.jobName
def master_prepare(self, app, appconfig): '''Prepare the master job''' from pandatools import Client from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec job = app._getParent() logger.debug('ExecutablePandaRTHandler master_prepare called for %s', job.getFQID('.')) # set chirp variables if configPanda['chirpconfig'] or configPanda['chirpserver']: setChirpVariables() # Pack inputsandbox inputsandbox = 'sources.%s.tar' % commands.getoutput( 'uuidgen 2> /dev/null') inpw = job.getInputWorkspace() # add user script to inputsandbox if hasattr(job.application.exe, "name"): if not job.application.exe in job.inputsandbox: job.inputsandbox.append(job.application.exe) for fname in [f.name for f in job.inputsandbox]: fname.rstrip(os.sep) path = fname[:fname.rfind(os.sep)] f = fname[fname.rfind(os.sep) + 1:] rc, output = commands.getstatusoutput( 'tar rf %s -C %s %s' % (inpw.getPath(inputsandbox), path, f)) if rc: logger.error('Packing inputsandbox failed with status %d', rc) logger.error(output) raise ApplicationConfigurationError( None, 'Packing inputsandbox failed.') if len(job.inputsandbox) > 0: rc, output = commands.getstatusoutput('gzip %s' % (inpw.getPath(inputsandbox))) if rc: logger.error('Packing inputsandbox failed with status %d', rc) logger.error(output) raise ApplicationConfigurationError( None, 'Packing inputsandbox failed.') inputsandbox += ".gz" else: inputsandbox = None # Upload Inputsandbox if inputsandbox: logger.debug('Uploading source tarball ...') uploadSources(inpw.getPath(), os.path.basename(inputsandbox)) self.inputsandbox = inputsandbox else: self.inputsandbox = None # input dataset if job.inputdata: if job.inputdata._name != 'DQ2Dataset': raise ApplicationConfigurationError( None, 'PANDA application supports only DQ2Datasets') # run brokerage here if not splitting if not job.splitter: from GangaPanda.Lib.Panda.Panda import runPandaBrokerage runPandaBrokerage(job) elif job.splitter._name not in [ 'DQ2JobSplitter', 'ArgSplitter', 'ArgSplitterTask' ]: raise ApplicationConfigurationError( None, 'Panda splitter must be DQ2JobSplitter or ArgSplitter') if job.backend.site == 'AUTO': raise ApplicationConfigurationError( None, 'site is still AUTO after brokerage!') # output dataset if job.outputdata: if job.outputdata._name != 'DQ2OutputDataset': raise ApplicationConfigurationError( None, 'Panda backend supports only DQ2OutputDataset') else: logger.info('Adding missing DQ2OutputDataset') job.outputdata = DQ2OutputDataset() job.outputdata.datasetname, outlfn = dq2outputdatasetname( job.outputdata.datasetname, job.id, job.outputdata.isGroupDS, job.outputdata.groupname) self.outDsLocation = Client.PandaSites[job.backend.site]['ddm'] try: Client.addDataset(job.outputdata.datasetname, False, location=self.outDsLocation) logger.info('Output dataset %s registered at %s' % (job.outputdata.datasetname, self.outDsLocation)) dq2_set_dataset_lifetime(job.outputdata.datasetname, location=self.outDsLocation) except exceptions.SystemExit: raise BackendError( 'Panda', 'Exception in Client.addDataset %s: %s %s' % (job.outputdata.datasetname, sys.exc_info()[0], sys.exc_info()[1])) # handle the libds if job.backend.libds: self.libDataset = job.backend.libds self.fileBO = getLibFileSpecFromLibDS(self.libDataset) self.library = self.fileBO.lfn elif job.backend.bexec: self.libDataset = job.outputdata.datasetname + '.lib' self.library = '%s.tgz' % self.libDataset try: Client.addDataset(self.libDataset, False, location=self.outDsLocation) dq2_set_dataset_lifetime(self.libDataset, location=self.outDsLocation) logger.info('Lib dataset %s registered at %s' % (self.libDataset, self.outDsLocation)) except exceptions.SystemExit: raise BackendError( 'Panda', 'Exception in Client.addDataset %s: %s %s' % (self.libDataset, sys.exc_info()[0], sys.exc_info()[1])) # collect extOutFiles self.extOutFile = [] for tmpName in job.outputdata.outputdata: if tmpName != '': self.extOutFile.append(tmpName) for tmpName in job.outputsandbox: if tmpName != '': self.extOutFile.append(tmpName) for tmpName in job.backend.extOutFile: if tmpName != '': self.extOutFile.append(tmpName) # create build job if job.backend.bexec != '': jspec = JobSpec() jspec.jobDefinitionID = job.id jspec.jobName = commands.getoutput('uuidgen 2> /dev/null') jspec.transformation = '%s/buildGen-00-00-01' % Client.baseURLSUB if Client.isDQ2free(job.backend.site): jspec.destinationDBlock = '%s/%s' % ( job.outputdata.datasetname, self.libDataset) jspec.destinationSE = 'local' else: jspec.destinationDBlock = self.libDataset jspec.destinationSE = job.backend.site jspec.prodSourceLabel = configPanda['prodSourceLabelBuild'] jspec.processingType = configPanda['processingType'] jspec.assignedPriority = configPanda['assignedPriorityBuild'] jspec.computingSite = job.backend.site jspec.cloud = job.backend.requirements.cloud jspec.jobParameters = '-o %s' % (self.library) if self.inputsandbox: jspec.jobParameters += ' -i %s' % (self.inputsandbox) else: raise ApplicationConfigurationError( None, 'Executable on Panda with build job defined, but inputsandbox is emtpy !' ) matchURL = re.search('(http.*://[^/]+)/', Client.baseURLCSRVSSL) if matchURL: jspec.jobParameters += ' --sourceURL %s ' % matchURL.group(1) if job.backend.bexec != '': jspec.jobParameters += ' --bexec "%s" ' % urllib.quote( job.backend.bexec) jspec.jobParameters += ' -r %s ' % '.' fout = FileSpec() fout.lfn = self.library fout.type = 'output' fout.dataset = self.libDataset fout.destinationDBlock = self.libDataset jspec.addFile(fout) flog = FileSpec() flog.lfn = '%s.log.tgz' % self.libDataset flog.type = 'log' flog.dataset = self.libDataset flog.destinationDBlock = self.libDataset jspec.addFile(flog) return jspec else: return None
def master_prepare(self, app, appmasterconfig): # PandaTools from pandatools import Client from pandatools import AthenaUtils from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec job = app._getParent() logger.debug('AthenaMCPandaRTHandler master_prepare called for %s', job.getFQID('.')) usertag = configDQ2['usertag'] #usertag='user09' nickname = getNickname(allowMissingNickname=True) self.libDataset = '%s.%s.ganga.%s_%d.lib._%06d' % ( usertag, nickname, commands.getoutput('hostname').split('.')[0], int(time.time()), job.id) # self.userprefix='%s.%s.ganga' % (usertag,gridProxy.identity()) sources = 'sources.%s.tar.gz' % commands.getoutput( 'uuidgen 2> /dev/null') self.library = '%s.lib.tgz' % self.libDataset # check DBRelease # if job.backend.dbRelease != '' and job.backend.dbRelease.find(':') == -1: # raise ApplicationConfigurationError(None,"ERROR : invalid argument for backend.dbRelease. Must be 'DatasetName:FileName'") # unpack library logger.debug('Creating source tarball ...') tmpdir = '/tmp/%s' % commands.getoutput('uuidgen 2> /dev/null') os.mkdir(tmpdir) inputbox = [] if os.path.exists(app.transform_archive): # must add a condition on size. inputbox += [File(app.transform_archive)] if app.evgen_job_option: self.evgen_job_option = app.evgen_job_option if os.path.exists(app.evgen_job_option): # locally modified job option file to add to the input sand box inputbox += [File(app.evgen_job_option)] self.evgen_job_option = app.evgen_job_option.split("/")[-1] # add input sandbox files if (job.inputsandbox): for file in job.inputsandbox: inputbox += [file] # add option files for extFile in job.backend.extOutFile: try: shutil.copy(extFile, tmpdir) except IOError: os.makedirs(tmpdir) shutil.copy(extFile, tmpdir) # fill the archive for opt_file in inputbox: try: shutil.copy(opt_file.name, tmpdir) except IOError: os.makedirs(tmpdir) shutil.copy(opt_file.name, tmpdir) # now tar it up again inpw = job.getInputWorkspace() rc, output = commands.getstatusoutput('tar czf %s -C %s .' % (inpw.getPath(sources), tmpdir)) if rc: logger.error('Packing sources failed with status %d', rc) logger.error(output) raise ApplicationConfigurationError(None, 'Packing sources failed.') shutil.rmtree(tmpdir) # upload sources logger.debug('Uploading source tarball ...') try: cwd = os.getcwd() os.chdir(inpw.getPath()) rc, output = Client.putFile(sources) if output != 'True': logger.error('Uploading sources %s failed. Status = %d', sources, rc) logger.error(output) raise ApplicationConfigurationError( None, 'Uploading archive failed') finally: os.chdir(cwd) # Use Panda's brokerage ## if job.inputdata and len(app.sites)>0: ## # update cloud, use inputdata's ## from dq2.info.TiersOfATLAS import whichCloud,ToACache ## inclouds=[] ## for site in app.sites: ## cloudSite=whichCloud(app.sites[0]) ## if cloudSite not in inclouds: ## inclouds.append(cloudSite) ## # now converting inclouds content into proper brokering stuff. ## outclouds=[] ## for cloudSite in inclouds: ## for cloudID, eachCloud in ToACache.dbcloud.iteritems(): ## if cloudSite==eachCloud: ## cloud=cloudID ## outclouds.append(cloud) ## break ## print outclouds ## # finally, matching with user's wishes ## if len(outclouds)>0: ## if not job.backend.requirements.cloud: # no user wish, update ## job.backend.requirements.cloud=outclouds[0] ## else: ## try: ## assert job.backend.requirements.cloud in outclouds ## except: ## raise ApplicationConfigurationError(None,'Input dataset not available in target cloud %s. Please try any of the following %s' % (job.backend.requirements.cloud, str(outclouds))) from GangaPanda.Lib.Panda.Panda import runPandaBrokerage runPandaBrokerage(job) if job.backend.site == 'AUTO': raise ApplicationConfigurationError( None, 'site is still AUTO after brokerage!') # output dataset preparation and registration try: outDsLocation = Client.PandaSites[job.backend.site]['ddm'] except: raise ApplicationConfigurationError( None, "Could not extract output dataset location from job.backend.site value: %s. Aborting" % job.backend.site) if not app.dryrun: for outtype in app.outputpaths.keys(): dset = string.replace(app.outputpaths[outtype], "/", ".") dset = dset[1:] # dataset registration must be done only once. print "registering output dataset %s at %s" % (dset, outDsLocation) try: Client.addDataset(dset, False, location=outDsLocation) dq2_set_dataset_lifetime(dset, location=outDsLocation) except: raise ApplicationConfigurationError( None, "Fail to create output dataset %s. Aborting" % dset) # extend registration to build job lib dataset: print "registering output dataset %s at %s" % (self.libDataset, outDsLocation) try: Client.addDataset(self.libDataset, False, location=outDsLocation) dq2_set_dataset_lifetime(self.libDataset, outDsLocation) except: raise ApplicationConfigurationError( None, "Fail to create output dataset %s. Aborting" % self.libDataset) ### cacheVer = "-AtlasProduction_" + str(app.prod_release) logger.debug("master job submit?") self.outsite = job.backend.site if app.se_name and app.se_name != "none" and not self.outsite: self.outsite = app.se_name # create build job jspec = JobSpec() jspec.jobDefinitionID = job.id jspec.jobName = commands.getoutput('uuidgen 2> /dev/null') jspec.AtlasRelease = 'Atlas-%s' % app.atlas_rel jspec.homepackage = 'AnalysisTransforms' + cacheVer #+nightVer jspec.transformation = '%s/buildJob-00-00-03' % Client.baseURLSUB # common base to Athena and AthenaMC jobs: buildJob is a pilot job which takes care of all inputs for the real jobs (in prepare() jspec.destinationDBlock = self.libDataset jspec.destinationSE = job.backend.site jspec.prodSourceLabel = 'panda' jspec.assignedPriority = 2000 jspec.computingSite = job.backend.site jspec.cloud = job.backend.requirements.cloud # jspec.jobParameters = self.args not known yet jspec.jobParameters = '-o %s' % (self.library) if app.userarea: print app.userarea jspec.jobParameters += ' -i %s' % (os.path.basename(app.userarea)) else: jspec.jobParameters += ' -i %s' % (sources) jspec.cmtConfig = AthenaUtils.getCmtConfig(athenaVer=app.atlas_rel) matchURL = re.search('(http.*://[^/]+)/', Client.baseURLSSL) if matchURL: jspec.jobParameters += ' --sourceURL %s' % matchURL.group(1) fout = FileSpec() fout.lfn = self.library fout.type = 'output' fout.dataset = self.libDataset fout.destinationDBlock = self.libDataset jspec.addFile(fout) flog = FileSpec() flog.lfn = '%s.log.tgz' % self.libDataset flog.type = 'log' flog.dataset = self.libDataset flog.destinationDBlock = self.libDataset jspec.addFile(flog) #print "MASTER JOB DETAILS:",jspec.jobParameters return jspec
job.prodSourceLabel = 'managed' # priority job.assignedPriority = priority job.currentPriority = priority # CPU, memory,disk ### FIXME # attempt number ### FIXME # input files if iDataset != 'NULL': # remove _tidXXX pat = re.sub('_tid\d+$', '', iDataset) # search m = re.search('(' + pat + '\S+)', line) if m != None: file = FileSpec() file.lfn = m.group(1) file.type = 'input' file.dataset = iDataset file.prodDBlock = iDataset job.addFile(file) # DB release for i, lpar in enumerate(lparams): if lpar == 'DBRelease': file = FileSpec() file.lfn = "%s-%s.tgz" % (lpar, vparams[i]) file.type = 'input' file.dataset = iDataset file.prodDBlock = iDataset job.addFile(file) break
#job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh' job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh' job.destinationDBlock = datasetName #job.destinationSE = destName job.destinationSE = 'local' job.currentPriority = 1000 #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'panda' #job.prodSourceLabel = 'ptest' #job.prodSourceLabel = 'test' #job.prodSourceLabel = 'ptest' ### 2014-01-27 #job.prodSourceLabel = 'user' job.prodSourceLabel = 'panda' job.computingSite = site job.jobParameters = "" job.VO = "lsst" fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % job.jobName fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) s, o = Client.submitJobs([job], srvID=aSrvID) print s for x in o: print "PandaID=%s" % x[0]
destName = 'ANALY_BNL_ATLAS_1' job = JobSpec() job.jobDefinitionID = 1 job.jobName = commands.getoutput('uuidgen') job.AtlasRelease = 'Atlas-12.0.2' job.homepackage = 'AnalysisTransforms' job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena2' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 3000 job.prodSourceLabel = 'user' job.computingSite = site job.prodDBlock = 'testIdeal_06.005001.pythia_minbias.recon.AOD.v12000103' fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen') fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log' job.addFile(fileOL) fileOZ = FileSpec() fileOZ.lfn = "AANT.%s.root" % commands.getoutput('uuidgen') fileOZ.destinationDBlock = job.destinationDBlock fileOZ.destinationSE = job.destinationSE fileOZ.dataset = job.destinationDBlock fileOZ.type = 'output' job.addFile(fileOZ)
def send_merging_job(task, files_list, merge_chunk_number): logger.info( 'Going to send merging job for task %s run number %s and merge chunk number %s' % (task, files_list[0].run_number, merge_chunk_number)) input_files = '' input_files_copy = '' for j in files_list: TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'runChunk': j.chunk_number, 'prodSlt': j.task.prodslt, 'phastVer': j.task.phastver } input_files += ' ' + TMPMDSTFILE if j.task.site == 'BW_COMPASS_MCORE': input_files_copy += ' cp /scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' else: input_files_copy += ' xrdcp -N -f root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;' datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-mdst' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } logger.info(datasetName) destName = 'local' # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig) MERGEDHISTFILE = '%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDHISTFILE = MERGEDHISTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDHISTFILE) MERGEDMDSTFILE = 'mDST-%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % { 'runNumber': j.run_number, 'prodSlt': task.prodslt, 'phastVer': task.phastver } if format(merge_chunk_number, '03d') != '000': MERGEDMDSTFILE = MERGEDMDSTFILE + '.' + format(merge_chunk_number, '03d') logger.info(MERGEDMDSTFILE) TMPHISTFILE = 'merge-%(runNumber)s-ch%(mergeChunkNumber)s.root' % { 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } logger.info(TMPHISTFILE) PRODSOFT = task.soft ProdPathAndName = task.home + task.path + task.soft job = JobSpec() job.VO = 'vo.compass.cern.ch' job.taskID = task.id job.jobDefinitionID = 0 job.jobName = '%(prodNameOnly)s-merge-mdst-%(runNumber)s-ch%(mergeChunkNumber)s' % { 'prodNameOnly': task.production, 'runNumber': j.run_number, 'mergeChunkNumber': format(merge_chunk_number, '03d') } job.transformation = 'merging mdst' # payload (can be URL as well) job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 5000 job.prodSourceLabel = 'prod_test' job.computingSite = task.site job.attemptNr = j.attempt_merging_mdst + 1 job.maxAttempt = j.task.max_attempts if j.status_merging_mdst == 'failed': job.parentID = j.panda_id_merging_mdst if j.task.site == 'BW_COMPASS_MCORE': job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } else: job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % { 'MERGEDHISTFILE': MERGEDHISTFILE, 'MERGEDMDSTFILE': MERGEDMDSTFILE, 'PRODSOFT': PRODSOFT, 'input_files_copy': input_files_copy, 'input_files': input_files, 'ProdPathAndName': ProdPathAndName, 'prodPath': task.path, 'prodName': task.production, 'prodSlt': task.prodslt, 'TMPHISTFILE': TMPHISTFILE } fileOLog = FileSpec() fileOLog.lfn = "%s.job.log.tgz" % (job.jobName) fileOLog.destinationDBlock = job.destinationDBlock fileOLog.destinationSE = job.destinationSE fileOLog.dataset = job.destinationDBlock fileOLog.type = 'log' job.addFile(fileOLog) fileOmDST = FileSpec() fileOmDST.lfn = "%s" % (MERGEDMDSTFILE) fileOmDST.destinationDBlock = job.destinationDBlock fileOmDST.destinationSE = job.destinationSE fileOmDST.dataset = job.destinationDBlock fileOmDST.type = 'output' job.addFile(fileOmDST) fileOstdout = FileSpec() fileOstdout.lfn = "payload_stdout.out.gz" fileOstdout.destinationDBlock = job.destinationDBlock fileOstdout.destinationSE = job.destinationSE fileOstdout.dataset = job.destinationDBlock fileOstdout.type = 'output' job.addFile(fileOstdout) # fileOstderr = FileSpec() # fileOstderr.lfn = "payload_stderr.txt" # fileOstderr.destinationDBlock = job.destinationDBlock # fileOstderr.destinationSE = job.destinationSE # fileOstderr.dataset = job.destinationDBlock # fileOstderr.type = 'output' # job.addFile(fileOstderr) s, o = Client.submitJobs([job], srvID=aSrvID) logger.info(s) for x in o: logger.info("PandaID=%s" % x[0]) if x[0] != 0 and x[0] != 'NULL': for j in files_list: j_update = Job.objects.get(id=j.id) j_update.panda_id_merging_mdst = x[0] j_update.status_merging_mdst = 'sent' j_update.attempt_merging_mdst = j_update.attempt_merging_mdst + 1 j_update.chunk_number_merging_mdst = merge_chunk_number j_update.date_updated = today try: j_update.save() logger.info('Job %s with PandaID %s updated' % (j.id, x[0])) except IntegrityError as e: logger.exception('Unique together catched, was not saved') except DatabaseError as e: logger.exception('Something went wrong while saving: %s' % e.message) else: logger.info('Job %s was not added to PanDA' % j.id)
lfnListE = [] for i in range(2): job = JobSpec() job.jobDefinitionID = int(time.time()) % 10000 job.jobName = commands.getoutput('uuidgen') job.AtlasRelease = 'Atlas-11.0.3' job.homepackage = 'JobTransforms-11-00-03-03' job.transformation = 'share/csc.evgen.trf' job.destinationDBlock = datasetName job.destinationSE = destName job.currentPriority = 1000 job.prodSourceLabel = 'test' job.computingSite = site file = FileSpec() file.lfn = "%s.evgen.pool.root" % commands.getoutput('uuidgen') lfnListE.append(file.lfn) file.lfn += ('.%d' % (i + 1)) file.destinationDBlock = job.destinationDBlock file.destinationSE = job.destinationSE file.dataset = job.destinationDBlock file.type = 'output' job.addFile(file) fileOL = FileSpec() fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen') fileOL.destinationDBlock = job.destinationDBlock fileOL.destinationSE = job.destinationSE fileOL.dataset = job.destinationDBlock fileOL.type = 'log'
def prepare(self, app, appconfig, appmasterconfig, jobmasterconfig): '''prepare the subjob specific configuration''' # PandaTools from pandatools import Client from pandatools import AthenaUtils from taskbuffer.JobSpec import JobSpec from taskbuffer.FileSpec import FileSpec job = app._getParent() logger.debug('AthenaMCPandaRTHandler prepare called for %s', job.getFQID('.')) try: assert self.outsite except: logger.error("outsite not set. Aborting") raise Exception() job.backend.site = self.outsite job.backend.actualCE = self.outsite cloud = job._getRoot().backend.requirements.cloud job.backend.requirements.cloud = cloud # now just filling the job from AthenaMC data jspec = JobSpec() jspec.jobDefinitionID = job._getRoot().id jspec.jobName = commands.getoutput('uuidgen 2> /dev/null') jspec.AtlasRelease = 'Atlas-%s' % app.atlas_rel if app.transform_archive: jspec.homepackage = 'AnalysisTransforms' + app.transform_archive elif app.prod_release: jspec.homepackage = 'AnalysisTransforms-AtlasProduction_' + str( app.prod_release) jspec.transformation = '%s/runAthena-00-00-11' % Client.baseURLSUB #---->???? prodDBlock and destinationDBlock when facing several input / output datasets? jspec.prodDBlock = 'NULL' if job.inputdata and len( app.inputfiles) > 0 and app.inputfiles[0] in app.dsetmap: jspec.prodDBlock = app.dsetmap[app.inputfiles[0]] # How to specify jspec.destinationDBlock when more than one type of output is available? Panda prod jobs seem to specify only the last output dataset outdset = "" for type in ["EVNT", "RDO", "HITS", "AOD", "ESD", "NTUP"]: if type in app.outputpaths.keys(): outdset = string.replace(app.outputpaths[type], "/", ".") outdset = outdset[1:-1] break if not outdset: try: assert len(app.outputpaths.keys()) > 0 except: logger.error( "app.outputpaths is empty: check your output datasets") raise type = app.outputpaths.keys()[0] outdset = string.replace(app.outputpaths[type], "/", ".") outdset = outdset[1:-1] jspec.destinationDBlock = outdset jspec.destinationSE = self.outsite jspec.prodSourceLabel = 'user' jspec.assignedPriority = 1000 jspec.cloud = cloud # memory if job.backend.requirements.memory != -1: jspec.minRamCount = job.backend.requirements.memory jspec.computingSite = self.outsite jspec.cmtConfig = AthenaUtils.getCmtConfig(athenaVer=app.atlas_rel) # library (source files) flib = FileSpec() flib.lfn = self.library # flib.GUID = flib.type = 'input' # flib.status = flib.dataset = self.libDataset flib.dispatchDBlock = self.libDataset jspec.addFile(flib) # input files FIXME: many more input types for lfn in app.inputfiles: useguid = app.turls[lfn].replace("guid:", "") finp = FileSpec() finp.lfn = lfn finp.GUID = useguid finp.dataset = app.dsetmap[lfn] finp.prodDBlock = app.dsetmap[lfn] finp.prodDBlockToken = 'local' finp.dispatchDBlock = app.dsetmap[lfn] finp.type = 'input' finp.status = 'ready' jspec.addFile(finp) # add dbfiles if any: for lfn in app.dbfiles: useguid = app.dbturls[lfn].replace("guid:", "") finp = FileSpec() finp.lfn = lfn finp.GUID = useguid finp.dataset = app.dsetmap[lfn] finp.prodDBlock = app.dsetmap[lfn] finp.prodDBlockToken = 'local' finp.dispatchDBlock = app.dsetmap[lfn] finp.type = 'input' finp.status = 'ready' jspec.addFile(finp) # then minbias files for lfn in app.mbfiles: useguid = app.minbias_turls[lfn].replace("guid:", "") finp = FileSpec() finp.lfn = lfn finp.GUID = useguid finp.dataset = app.dsetmap[lfn] finp.prodDBlock = app.dsetmap[lfn] finp.prodDBlockToken = 'local' finp.dispatchDBlock = app.dsetmap[lfn] finp.type = 'input' finp.status = 'ready' jspec.addFile(finp) # then cavern files for lfn in app.cavernfiles: useguid = app.cavern_turls[lfn].replace("guid:", "") finp = FileSpec() finp.lfn = lfn finp.GUID = useguid finp.dataset = app.dsetmap[lfn] finp.prodDBlock = app.dsetmap[lfn] finp.prodDBlockToken = 'local' finp.dispatchDBlock = app.dsetmap[lfn] finp.type = 'input' finp.status = 'ready' jspec.addFile(finp) # output files( this includes the logfiles) # Output files jidtag = "" job = app._getParent() # Returns job or subjob object if job._getRoot().subjobs: jidtag = job._getRoot().id else: jidtag = "%d" % job.id outfiles = app.subjobsOutfiles[job.id] pandaOutfiles = {} for type in outfiles.keys(): pandaOutfiles[type] = outfiles[type] + "." + str(jidtag) if type == "LOG": pandaOutfiles[type] += ".tgz" #print pandaOutfiles for outtype in pandaOutfiles.keys(): fout = FileSpec() dset = string.replace(app.outputpaths[outtype], "/", ".") dset = dset[1:-1] fout.dataset = dset fout.lfn = pandaOutfiles[outtype] fout.type = 'output' # fout.destinationDBlock = jspec.destinationDBlock fout.destinationDBlock = fout.dataset fout.destinationSE = jspec.destinationSE if outtype == 'LOG': fout.type = 'log' fout.destinationDBlock = fout.dataset fout.destinationSE = job.backend.site jspec.addFile(fout) # job parameters param = '-l %s ' % self.library # user tarball. # use corruption checker if job.backend.requirements.corCheck: param += '--corCheck ' # disable to skip missing files if job.backend.requirements.notSkipMissing: param += '--notSkipMissing ' # transform parameters # need to update arglist with final output file name... newArgs = [] if app.mode == "evgen": app.args[3] = app.args[3] + " -t " if app.verbosity: app.args[3] = app.args[3] + " -l %s " % app.verbosity for arg in app.args[3:]: for type in outfiles.keys(): if arg.find(outfiles[type]) > -1: arg = arg.replace(outfiles[type], pandaOutfiles[type]) newArgs.append(arg) arglist = string.join(newArgs, " ") # print "Arglist:",arglist param += ' -r ./ ' param += ' -j "%s"' % urllib.quote(arglist) allinfiles = app.inputfiles + app.dbfiles # Input files. param += ' -i "%s" ' % allinfiles if len(app.mbfiles) > 0: param += ' -m "%s" ' % app.mbfiles if len(app.cavernfiles) > 0: param += ' -n "%s" ' % app.cavernfiles # param += '-m "[]" ' #%minList FIXME # param += '-n "[]" ' #%cavList FIXME del pandaOutfiles[ "LOG"] # logfiles do not appear in IROOT block, and this one is not needed anymore... param += ' -o "{\'IROOT\':%s }"' % str(pandaOutfiles.items()) # source URL matchURL = re.search("(http.*://[^/]+)/", Client.baseURLSSL) if matchURL != None: param += " --sourceURL %s " % matchURL.group(1) param += " --trf" jspec.jobParameters = param jspec.metadata = "--trf \"%s\"" % arglist #print "SUBJOB DETAILS:",jspec.values() if app.dryrun: print "job.application.dryrun activated, printing out job parameters" print jspec.values() return return jspec