Exemple #1
0
 def convertToJobFileSpec(self,
                          datasetSpec,
                          setType=None,
                          useEventService=False):
     jobFileSpec = JobFileSpec()
     jobFileSpec.fileID = self.fileID
     jobFileSpec.datasetID = datasetSpec.datasetID
     jobFileSpec.jediTaskID = datasetSpec.jediTaskID
     jobFileSpec.lfn = self.lfn
     jobFileSpec.GUID = self.GUID
     if setType is None:
         jobFileSpec.type = self.type
     else:
         jobFileSpec.type = setType
     jobFileSpec.scope = self.scope
     jobFileSpec.fsize = self.fsize
     jobFileSpec.checksum = self.checksum
     jobFileSpec.attemptNr = self.attemptNr
     # dataset attribute
     if datasetSpec is not None:
         # dataset
         if datasetSpec.containerName not in [None, '']:
             jobFileSpec.dataset = datasetSpec.containerName
         else:
             jobFileSpec.dataset = datasetSpec.datasetName
         if self.type in datasetSpec.getInputTypes(
         ) or setType in datasetSpec.getInputTypes():
             # prodDBlock
             jobFileSpec.prodDBlock = datasetSpec.datasetName
             # storage token
             if datasetSpec.storageToken not in ['', None]:
                 jobFileSpec.dispatchDBlockToken = datasetSpec.storageToken
         else:
             # destinationDBlock
             jobFileSpec.destinationDBlock = datasetSpec.datasetName
             # storage token
             if datasetSpec.storageToken not in ['', None]:
                 jobFileSpec.destinationDBlockToken = datasetSpec.storageToken.split(
                     '/')[0]
             # destination
             if datasetSpec.destination not in ['', None]:
                 jobFileSpec.destinationSE = datasetSpec.destination
             # set prodDBlockToken for Event Service
             if useEventService and datasetSpec.getObjectStore(
             ) is not None:
                 jobFileSpec.prodDBlockToken = 'objectstore^{0}'.format(
                     datasetSpec.getObjectStore())
             # allow no output
             if datasetSpec.isAllowedNoOutput():
                 jobFileSpec.allowNoOutput()
     # return
     return jobFileSpec
Exemple #2
0
 def convertToJobFileSpec(self,datasetSpec,setType=None,useEventService=False):
     jobFileSpec = JobFileSpec()
     jobFileSpec.fileID     = self.fileID
     jobFileSpec.datasetID  = datasetSpec.datasetID
     jobFileSpec.jediTaskID = datasetSpec.jediTaskID
     jobFileSpec.lfn        = self.lfn
     jobFileSpec.GUID       = self.GUID
     if setType == None:
         jobFileSpec.type   = self.type
     else:
         jobFileSpec.type   = setType
     jobFileSpec.scope      = self.scope
     jobFileSpec.fsize      = self.fsize
     jobFileSpec.checksum   = self.checksum
     jobFileSpec.attemptNr  = self.attemptNr
     # dataset attribute
     if datasetSpec != None:
         # dataset
         if not datasetSpec.containerName in [None,'']:
             jobFileSpec.dataset = datasetSpec.containerName
         else:
             jobFileSpec.dataset = datasetSpec.datasetName
         if self.type in datasetSpec.getInputTypes() or setType in datasetSpec.getInputTypes():
             # prodDBlock
             jobFileSpec.prodDBlock = datasetSpec.datasetName
             # storage token    
             if not datasetSpec.storageToken in ['',None]:
                 jobFileSpec.dispatchDBlockToken = datasetSpec.storageToken 
         else:
             # destinationDBlock
             jobFileSpec.destinationDBlock = datasetSpec.datasetName
             # storage token    
             if not datasetSpec.storageToken in ['',None]:
                 jobFileSpec.destinationDBlockToken = datasetSpec.storageToken.split('/')[0] 
             # destination
             if not datasetSpec.destination in ['',None]:
                 jobFileSpec.destinationSE = datasetSpec.destination
             # set prodDBlockToken for Event Service
             if useEventService and datasetSpec.getObjectStore() != None:
                 jobFileSpec.prodDBlockToken = 'objectstore^{0}'.format(datasetSpec.getObjectStore())
             # allow no output
             if datasetSpec.isAllowedNoOutput():
                 jobFileSpec.allowNoOutput()
     # return
     return jobFileSpec
Exemple #3
0
def main():
    logger.info('Getting tasks with status send and running')
    #    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running'))
    tasks_list = Task.objects.all().filter(name='dvcs2016P09t2r13v1_mu+')
    logger.info('Got list of %s tasks' % len(tasks_list))

    for t in tasks_list:
        logger.info('Getting jobs in status defined or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        i = 0
        for j in jobs_list:
            if i >= max_send_amount:
                break

            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'COMPASSPRODDISK'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
            TMPRAWFILE = j.file[j.file.rfind('/') + 1:]
            logger.info(TMPRAWFILE)
            TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % {
                'input_file': j.file,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            logger.info(TMPMDSTFILE)
            TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(TMPHISTFILE)
            TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(TMPRICHFILE)
            EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % {
                'prodSlt': j.task.prodslt,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(EVTDUMPFILE)
            STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDOUTFILE)
            STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDERRFILE)
            try:
                file_year = j.file.split('/')[5]
                logger.info(file_year)
            except:
                logger.error('Error while splitting file to get year')
                sys.exit(1)

            ProdPathAndName = j.task.home + j.task.path + j.task.soft

            job = JobSpec()
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % {
                'prodName': j.task.soft,
                'fileYear': file_year,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            job.prodSourceLabel = 'prod_test'
            job.computingSite = site
            job.attemptNr = j.attempt + 1
            job.maxAttempt = j.task.max_attempts
            if j.status == 'failed':
                job.parentID = j.panda_id
            head, tail = os.path.split(j.file)
            #            job.transferType = 'direct'
            job.sourceSite = 'CERN_COMPASS_PROD'

            # logs, and all files generated during execution will be placed in log (except output file)
            #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName}
            job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;rm %(tail)s' % {
                'TMPRAWFILE': TMPRAWFILE,
                'TMPMDSTFILE': TMPMDSTFILE,
                'TMPHISTFILE': TMPHISTFILE,
                'TMPRICHFILE': TMPRICHFILE,
                'input_file': j.file,
                'ProdPathAndName': ProdPathAndName,
                'prodPath': j.task.path,
                'prodName': j.task.soft,
                'template': j.task.template,
                'tail': tail,
                'prodSlt': j.task.prodslt,
                'EVTDUMPFILE': EVTDUMPFILE,
                'STDOUTFILE': STDOUTFILE,
                'STDERRFILE': STDERRFILE
            }

            fileIRaw = FileSpec()
            fileIRaw.lfn = "%s" % (j.file)
            fileIRaw.GUID = '5874a461-61d3-4543-8f34-6fd7a4624e78'
            fileIRaw.fsize = 1073753368
            fileIRaw.checksum = '671608be'
            fileIRaw.destinationDBlock = job.destinationDBlock
            fileIRaw.destinationSE = job.destinationSE
            fileIRaw.dataset = job.destinationDBlock
            fileIRaw.type = 'input'
            job.addFile(fileIRaw)

            fileOstdout = FileSpec()
            fileOstdout.lfn = "payload_stdout.txt"
            fileOstdout.destinationDBlock = job.destinationDBlock
            fileOstdout.destinationSE = job.destinationSE
            fileOstdout.dataset = job.destinationDBlock
            fileOstdout.type = 'output'
            job.addFile(fileOstdout)

            fileOstderr = FileSpec()
            fileOstderr.lfn = "payload_stderr.txt"
            fileOstderr.destinationDBlock = job.destinationDBlock
            fileOstderr.destinationSE = job.destinationSE
            fileOstderr.dataset = job.destinationDBlock
            fileOstderr.type = 'output'
            job.addFile(fileOstderr)

            fileOLog = FileSpec()
            fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % {
                'prodName': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            fileOmDST = FileSpec()
            fileOmDST.lfn = "%s" % (TMPMDSTFILE)
            fileOmDST.destinationDBlock = job.destinationDBlock
            fileOmDST.destinationSE = job.destinationSE
            fileOmDST.dataset = job.destinationDBlock
            fileOmDST.type = 'output'
            job.addFile(fileOmDST)

            fileOTrafdic = FileSpec()
            fileOTrafdic.lfn = "%s" % (TMPHISTFILE)
            fileOTrafdic.destinationDBlock = job.destinationDBlock
            fileOTrafdic.destinationSE = job.destinationSE
            fileOTrafdic.dataset = job.destinationDBlock
            fileOTrafdic.type = 'output'
            job.addFile(fileOTrafdic)

            fileOtestevtdump = FileSpec()
            fileOtestevtdump.lfn = "testevtdump.raw"
            fileOtestevtdump.destinationDBlock = job.destinationDBlock
            fileOtestevtdump.destinationSE = job.destinationSE
            fileOtestevtdump.dataset = job.destinationDBlock
            fileOtestevtdump.type = 'output'
            job.addFile(fileOtestevtdump)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            logger.info(o)
            #             for x in o:
            #                 logger.info("PandaID=%s" % x[0])
            #                 today = datetime.datetime.today()
            #
            #                 if x[0] != 0 and x[0] != 'NULL':
            #                     j_update = Job.objects.get(id=j.id)
            #                     j_update.panda_id = x[0]
            #                     j_update.status = 'sent'
            #                     j_update.attempt = j_update.attempt + 1
            #                     j_update.date_updated = today
            #
            #                     try:
            #                         j_update.save()
            #                         logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today))
            #                     except IntegrityError as e:
            #                         logger.exception('Unique together catched, was not saved')
            #                     except DatabaseError as e:
            #                         logger.exception('Something went wrong while saving: %s' % e.message)
            #                 else:
            #                     logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('uuidgen')
job.destinationSE = 'AGLT2_TEST'
job.prodDBlock = 'user.mlassnig:user.mlassnig.pilot.test.single.hits'
job.currentPriority = 1000
#job.prodSourceLabel   = 'ptest'
job.prodSourceLabel = 'user'
job.computingSite = site
job.cloud = cloud
job.cmtConfig = 'x86_64-slc6-gcc48-opt'
job.specialHandling = 'ddm:rucio'
#job.transferType      = 'direct'

ifile = 'HITS.06828093._000096.pool.root.1'
fileI = FileSpec()
fileI.GUID = 'AC5B3759-B606-BA42-8681-4BD86455AE02'
fileI.checksum = 'ad:5d000974'
fileI.dataset = 'user.mlassnig:user.mlassnig.pilot.test.single.hits'
fileI.fsize = 94834717
fileI.lfn = ifile
fileI.prodDBlock = job.prodDBlock
fileI.scope = 'mc15_13TeV'
fileI.type = 'input'
job.addFile(fileI)

ofile = 'RDO_%s.root' % commands.getoutput('uuidgen')
fileO = FileSpec()
fileO.dataset = job.destinationDBlock
fileO.destinationDBlock = job.destinationDBlock
fileO.destinationSE = job.destinationSE
fileO.lfn = ofile
fileO.type = 'output'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('uuidgen')
job.destinationSE     = 'AGLT2_TEST'
job.prodDBlock        = 'user.mlassnig:user.mlassnig.pilot.test.single.hits'
job.currentPriority   = 1000
#job.prodSourceLabel   = 'ptest'
job.prodSourceLabel   = 'user'
job.computingSite     = site
job.cloud             = cloud
job.cmtConfig         = 'x86_64-slc6-gcc48-opt'
job.specialHandling   = 'ddm:rucio'
#job.transferType      = 'direct'

ifile = 'HITS.06828093._000096.pool.root.1'
fileI = FileSpec()
fileI.GUID = 'AC5B3759-B606-BA42-8681-4BD86455AE02'
fileI.checksum = 'ad:5d000974'
fileI.dataset = 'user.mlassnig:user.mlassnig.pilot.test.single.hits'
fileI.fsize = 94834717
fileI.lfn = ifile
fileI.prodDBlock = job.prodDBlock
fileI.scope = 'mc15_13TeV'
fileI.type = 'input'
job.addFile(fileI)

ofile = 'RDO_%s.root' % commands.getoutput('uuidgen')
fileO = FileSpec()
fileO.dataset = job.destinationDBlock
fileO.destinationDBlock = job.destinationDBlock
fileO.destinationSE = job.destinationSE
fileO.lfn = ofile
fileO.type = 'output'