Exemplo n.º 1
0
 def __submit(self):
     s, o = Client.submitJobs(self.__joblist, srvID=self.__aSrvID)
     #print "S: %s" % s
     #print "O: %s" % o
     #panda_ids = json.loads(o)
     for x in o:
         print "PandaID=%s" % x[0]
Exemplo n.º 2
0
    def submitJobs(self, jobList):
        print 'Submit jobs'
        _logger.debug('Submit jobs')

        s, o = Client.submitJobs(jobList)
        _logger.debug("---------------------")
        _logger.debug(s)

        for x in o:
            _logger.debug("PandaID=%s" % x[0])
        return o
Exemplo n.º 3
0
	def __submit(self, name):
		# gets name of job
		# submits
		# returns PanDA id
		if name is None or name not in self.__joblist:
			return -1
		s,o = Client.submitJobs([self.__joblist[name][0]],srvID=self.__aSrvID)
		#print s
		#print o
		for x in o:
			print "PandaID=%s" % x[0]
			return x[0]
Exemplo n.º 4
0
def submitJobs(jobList):
    print 'Submit jobs'
    _logger.debug('Submit jobs')
    _logger.debug(str(jobList))
    s,o = Client.submitJobs(jobList)
    _logger.debug(o)
    _logger.debug(s)
    _logger.debug("---------------------")

    for x in o:
        _logger.debug("PandaID=%s" % x[0])
    return o
    def generateJobs(self):

        for i in range(self.__nJobs):
            job = self.defineEvgen16Job(i)
            self.__jobList.append({'jobSpec': job, 'jobID': None})

        status, output = Client.submitJobs([job['jobSpec'] for job in self.__jobList]) #Return from submitJobs: ret.append((job.PandaID,job.jobDefinitionID,{'jobsetID':job.jobsetID}))

        assert status == 0, "Submission of jobs finished with status: %s" %status

        assert len(self.__jobList) == len(output), "Not all jobs seem to have been submitted properly"

        for job, ids in zip(self.__jobList, output):
            jobID = ids[0]
            job['jobID'] = jobID
            print("Generated job PandaID = %s" %jobID)

        return
    def generateJobs(self):

        for i in range(self.__nJobs):
            job = self.defineEvgen16Job(i)
            self.__jobList.append({'jobSpec': job, 'jobID': None})

        status, output = Client.submitJobs(
            [job['jobSpec'] for job in self.__jobList]
        )  #Return from submitJobs: ret.append((job.PandaID,job.jobDefinitionID,{'jobsetID':job.jobsetID}))

        assert status == 0, "Submission of jobs finished with status: %s" % status

        assert len(self.__jobList) == len(
            output), "Not all jobs seem to have been submitted properly"

        for job, ids in zip(self.__jobList, output):
            jobID = ids[0]
            job['jobID'] = jobID
            _logger.info("Generated job PandaID = %s" % jobID)

        return
Exemplo n.º 7
0
job.prodDBlock        = 'pandatest.000003.dd.input'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('/usr/bin/uuidgen')
job.destinationSE     = 'BNL_SE'

ids = {'pandatest.000003.dd.input._00028.junk':'6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
       'pandatest.000003.dd.input._00033.junk':'98f79ba1-1793-4253-aac7-bdf90a51d1ee',
       'pandatest.000003.dd.input._00039.junk':'33660dd5-7cef-422a-a7fc-6c24cb10deb1'}
for lfn in ids.keys():
    file = FileSpec()
    file.lfn = lfn
    file.GUID = ids[file.lfn]
    file.dataset = 'pandatest.000003.dd.input'
    file.type = 'input'
    job.addFile(file)

s,o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s,o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
print s
if s == 0:
    for job in o:
        if job == None:
            continue
        print job.PandaID
        for file in job.Files:
            print file.lfn,file.type
print "---------------------"
s,o = Client.queryPandaIDs([0])
Exemplo n.º 8
0
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE     = destName
job.destinationSE     = 'local' 
job.currentPriority   = 1000
#job.prodSourceLabel   = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
#job.prodSourceLabel = 'user'
job.prodSourceLabel = 'panda'
job.computingSite     = site
job.jobParameters = ""
job.VO = "lsst"

fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE     = job.destinationSE
fileOL.dataset           = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)


s,o = Client.submitJobs([job],srvID=aSrvID)
print s
for x in o:
    print "PandaID=%s" % x[0]
Exemplo n.º 9
0
def main():
    logger.info('Getting tasks with status send and running')
    tasks_list = Task.objects.all().filter(
        Q(status='send') | Q(status='running'))
    #tasks_list = Task.objects.all().filter(name='dvcs2017align7_mu-')
    logger.info('Got list of %s tasks' % len(tasks_list))

    cdbServerArr = ['compassvm23.cern.ch', 'compassvm24.cern.ch']
    cdbServer = cdbServerArr[0]

    for t in tasks_list:
        max_send_amount = 1000

        logger.info('Getting jobs in status staged or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).filter(
            attempt__lt=t.max_attempts).filter(
                Q(status='staged') | Q(status='failed')).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(task=t).filter(
                attempt__lt=t.max_attempts).filter(
                    Q(status='staged') | Q(status='failed')).order_by(
                        '-number_of_events')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(task=t).filter(
                attempt__lt=t.max_attempts).filter(
                    Q(status='staged') | Q(status='failed')).order_by(
                        '-number_of_events')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        #    jobs_list = Job.objects.all().filter(task=t).filter(file='/castor/cern.ch/compass/data/2017/raw/W04/cdr12116-278485.raw')

        i = 0
        for j in jobs_list:
            if j.attempt >= j.task.max_attempts:
                logger.info(
                    'Number of retry attempts has reached for job %s of task %s'
                    % (j.file, j.task.name))
                continue

            if i > max_send_amount:
                break

            logger.info('Job %s of %s' % (i, max_send_amount))
            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'local'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
            TMPRAWFILE = j.file[j.file.rfind('/') + 1:]
            logger.info(TMPRAWFILE)
            TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % {
                'input_file': j.file,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            logger.info(TMPMDSTFILE)
            TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(TMPHISTFILE)
            TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(TMPRICHFILE)
            EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % {
                'prodSlt': j.task.prodslt,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(EVTDUMPFILE)
            STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % {
                'prodNameOnly': j.task.production,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDOUTFILE)
            STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % {
                'prodNameOnly': j.task.production,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDERRFILE)
            PRODSOFT = j.task.soft
            logger.info(PRODSOFT)

            ProdPathAndName = j.task.home + j.task.path + j.task.soft

            job = JobSpec()
            job.VO = 'vo.compass.cern.ch'
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % {
                'prodName': j.task.production,
                'fileYear': j.task.year,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            if j.task.type == 'DDD filtering':
                job.currentPriority = 1000
            job.prodSourceLabel = 'prod_test'
            job.computingSite = j.task.site
            job.attemptNr = j.attempt + 1
            job.maxAttempt = j.task.max_attempts
            if j.status == 'failed':
                job.parentID = j.panda_id
            head, tail = os.path.split(j.file)

            cdbServer = cdbServerArr[random.randrange(len(cdbServerArr))]

            # logs, and all files generated during execution will be placed in log (except output file)
            #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName}
            if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production':
                if j.task.site == 'BW_COMPASS_MCORE':
                    job.jobParameters = 'ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;cp %(input_file)s .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % {
                        'TMPRAWFILE': TMPRAWFILE,
                        'TMPMDSTFILE': TMPMDSTFILE,
                        'TMPHISTFILE': TMPHISTFILE,
                        'TMPRICHFILE': TMPRICHFILE,
                        'PRODSOFT': PRODSOFT,
                        'input_file': j.file,
                        'ProdPathAndName': ProdPathAndName,
                        'prodPath': j.task.path,
                        'prodName': j.task.production,
                        'template': j.task.template,
                        'tail': tail,
                        'prodSlt': j.task.prodslt,
                        'EVTDUMPFILE': EVTDUMPFILE,
                        'STDOUTFILE': STDOUTFILE,
                        'STDERRFILE': STDERRFILE
                    }
                else:
                    job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;export CDBSERVER=%(cdbServer)s;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % {
                        'TMPRAWFILE': TMPRAWFILE,
                        'TMPMDSTFILE': TMPMDSTFILE,
                        'TMPHISTFILE': TMPHISTFILE,
                        'TMPRICHFILE': TMPRICHFILE,
                        'PRODSOFT': PRODSOFT,
                        'input_file': j.file,
                        'ProdPathAndName': ProdPathAndName,
                        'prodPath': j.task.path,
                        'prodName': j.task.production,
                        'template': j.task.template,
                        'tail': tail,
                        'prodSlt': j.task.prodslt,
                        'EVTDUMPFILE': EVTDUMPFILE,
                        'STDOUTFILE': STDOUTFILE,
                        'STDERRFILE': STDERRFILE,
                        'cdbServer': cdbServer
                    }
            if j.task.type == 'DDD filtering':
                job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;export PRODSOFT=%(PRODSOFT)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/src/DaqDataDecoding/examples/how-to/ddd --filter-CAL --out=testevtdump.raw %(TMPRAWFILE)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stderr.out;gzip payload_stdout.out;rm %(tail)s' % {
                    'TMPRAWFILE': TMPRAWFILE,
                    'TMPMDSTFILE': TMPMDSTFILE,
                    'TMPHISTFILE': TMPHISTFILE,
                    'TMPRICHFILE': TMPRICHFILE,
                    'PRODSOFT': PRODSOFT,
                    'input_file': j.file,
                    'ProdPathAndName': ProdPathAndName,
                    'prodPath': j.task.path,
                    'prodName': j.task.production,
                    'template': j.task.template,
                    'tail': tail,
                    'prodSlt': j.task.prodslt,
                    'EVTDUMPFILE': EVTDUMPFILE,
                    'STDOUTFILE': STDOUTFILE,
                    'STDERRFILE': STDERRFILE
                }

    #     fileIRaw = FileSpec()
    #     fileIRaw.lfn = "%s" % (input_file)
    #     fileIRaw.destinationDBlock = job.destinationDBlock
    #     fileIRaw.destinationSE     = job.destinationSE
    #     fileIRaw.dataset           = job.destinationDBlock
    #     fileIRaw.type = 'input'
    #     job.addFile(fileIRaw)

            fileOstdout = FileSpec()
            fileOstdout.lfn = "payload_stdout.out.gz"
            fileOstdout.destinationDBlock = job.destinationDBlock
            fileOstdout.destinationSE = job.destinationSE
            fileOstdout.dataset = job.destinationDBlock
            fileOstdout.type = 'output'
            job.addFile(fileOstdout)

            fileOstderr = FileSpec()
            fileOstderr.lfn = "payload_stderr.out.gz"
            fileOstderr.destinationDBlock = job.destinationDBlock
            fileOstderr.destinationSE = job.destinationSE
            fileOstderr.dataset = job.destinationDBlock
            fileOstderr.type = 'output'
            job.addFile(fileOstderr)

            fileOLog = FileSpec()
            fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % {
                'prodName': j.task.production,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production':
                fileOmDST = FileSpec()
                fileOmDST.lfn = "%s" % (TMPMDSTFILE)
                fileOmDST.destinationDBlock = job.destinationDBlock
                fileOmDST.destinationSE = job.destinationSE
                fileOmDST.dataset = job.destinationDBlock
                fileOmDST.type = 'output'
                job.addFile(fileOmDST)

                fileOTrafdic = FileSpec()
                fileOTrafdic.lfn = "%s" % (TMPHISTFILE)
                fileOTrafdic.destinationDBlock = job.destinationDBlock
                fileOTrafdic.destinationSE = job.destinationSE
                fileOTrafdic.dataset = job.destinationDBlock
                fileOTrafdic.type = 'output'
                job.addFile(fileOTrafdic)

            if j.task.type == 'test production' or j.task.type == 'mass production' or j.task.type == 'technical production' or j.task.type == 'DDD filtering':
                fileOtestevtdump = FileSpec()
                fileOtestevtdump.lfn = "testevtdump.raw"
                fileOtestevtdump.destinationDBlock = job.destinationDBlock
                fileOtestevtdump.destinationSE = job.destinationSE
                fileOtestevtdump.dataset = job.destinationDBlock
                fileOtestevtdump.type = 'output'
                job.addFile(fileOtestevtdump)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            for x in o:
                logger.info("PandaID=%s" % x[0])
                if x[0] != 0 and x[0] != 'NULL':
                    j_update = Job.objects.get(id=j.id)
                    j_update.panda_id = x[0]
                    j_update.status = 'sent'
                    j_update.attempt = j_update.attempt + 1
                    j_update.date_updated = timezone.now()

                    try:
                        j_update.save()
                        logger.info('Job %s with PandaID %s updated at %s' %
                                    (j.id, x[0], timezone.now()))

                        if j_update.task.status == 'send':
                            logger.info(
                                'Going to update status of task %s from send to running'
                                % j_update.task.name)
                            t_update = Task.objects.get(id=j_update.task.id)
                            t_update.status = 'running'
                            t_update.date_updated = timezone.now()

                            try:
                                t_update.save()
                                logger.info('Task %s updated' % t_update.name)
                            except IntegrityError as e:
                                logger.exception(
                                    'Unique together catched, was not saved')
                            except DatabaseError as e:
                                logger.exception(
                                    'Something went wrong while saving: %s' %
                                    e.message)

                    except IntegrityError as e:
                        logger.exception(
                            'Unique together catched, was not saved')
                    except DatabaseError as e:
                        logger.exception(
                            'Something went wrong while saving: %s' %
                            e.message)
                else:
                    logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
Exemplo n.º 10
0
def send_merging_job(task, files_list, merge_chunk_number):
    logger.info(
        'Going to send merging job for task %s run number %s and merge chunk number %s'
        % (task, files_list[0].run_number, merge_chunk_number))

    input_files_copy = ''
    input_files_rm = ''
    for j in files_list:
        TMPDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % {
            'runNumber': j.run_number,
            'runChunk': j.chunk_number,
            'prodSlt': j.task.prodslt
        }
        if j.task.site == 'BW_COMPASS_MCORE':
            input_files_copy += ' cp $dumpspath/' + TMPDUMPFILE + ' .;'
        else:
            input_files_copy += ' xrdcp -N -f $dumpspath/' + TMPDUMPFILE + ' .;'

    datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-dump' % {
        'prodNameOnly': task.production,
        'runNumber': j.run_number,
        'prodSlt': task.prodslt,
        'phastVer': task.phastver
    }
    logger.info(datasetName)
    destName = 'local'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
    MERGEDDUMPFILE = 'evtdump%(prodSlt)s-%(runNumber)s.raw' % {
        'runNumber': j.run_number,
        'prodSlt': task.prodslt
    }
    if format(merge_chunk_number, '03d') != '000':
        MERGEDDUMPFILE = MERGEDDUMPFILE + '.' + format(merge_chunk_number,
                                                       '03d')
    logger.info(MERGEDDUMPFILE)
    PRODSOFT = task.soft
    ProdPathAndName = task.home + task.path + task.soft
    if j.task.site == 'BW_COMPASS_MCORE':
        dumpsPath = '/scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/evtdump/slot' + str(
            task.prodslt)
    else:
        dumpsPath = 'root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/evtdump/slot' + str(
            task.prodslt)

    job = JobSpec()
    job.VO = 'vo.compass.cern.ch'
    job.taskID = task.id
    job.jobDefinitionID = 0
    job.jobName = '%(prodNameOnly)s-merge-dump-%(runNumber)s-ch%(mergeChunkNumber)s' % {
        'prodNameOnly': task.production,
        'runNumber': j.run_number,
        'mergeChunkNumber': format(merge_chunk_number, '03d')
    }
    job.transformation = 'merging dump'  # payload (can be URL as well)
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.currentPriority = 5000
    job.prodSourceLabel = 'prod_test'
    job.computingSite = task.site
    job.attemptNr = j.attempt_merging_evntdmp + 1
    job.maxAttempt = j.task.max_attempts
    if j.status_merging_evntdmp == 'failed':
        job.parentID = j.panda_id_merging_evntdmp

    if j.task.site == 'BW_COMPASS_MCORE':
        job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % {
            'MERGEDDUMPFILE': MERGEDDUMPFILE,
            'dumpsPath': dumpsPath,
            'PRODSOFT': PRODSOFT,
            'input_files_copy': input_files_copy,
            'ProdPathAndName': ProdPathAndName,
            'prodPath': task.path,
            'prodName': task.production,
            'prodSlt': task.prodslt
        }
    else:
        job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDDUMPFILE=%(MERGEDDUMPFILE)s;export dumpspath=%(dumpsPath)s;export PRODSOFT=%(PRODSOFT)s;%(input_files_copy)scat evtdump%(prodSlt)s-*-*.raw > %(MERGEDDUMPFILE)s;rm evtdump%(prodSlt)s-*-*.raw;' % {
            'MERGEDDUMPFILE': MERGEDDUMPFILE,
            'dumpsPath': dumpsPath,
            'PRODSOFT': PRODSOFT,
            'input_files_copy': input_files_copy,
            'ProdPathAndName': ProdPathAndName,
            'prodPath': task.path,
            'prodName': task.production,
            'prodSlt': task.prodslt
        }

    fileOLog = FileSpec()
    fileOLog.lfn = "%s.job.log.tgz" % (job.jobName)
    fileOLog.destinationDBlock = job.destinationDBlock
    fileOLog.destinationSE = job.destinationSE
    fileOLog.dataset = job.destinationDBlock
    fileOLog.type = 'log'
    job.addFile(fileOLog)

    fileOdump = FileSpec()
    fileOdump.lfn = "%s" % (MERGEDDUMPFILE)
    fileOdump.destinationDBlock = job.destinationDBlock
    fileOdump.destinationSE = job.destinationSE
    fileOdump.dataset = job.destinationDBlock
    fileOdump.type = 'output'
    job.addFile(fileOdump)

    #     fileOstdout = FileSpec()
    #     fileOstdout.lfn = "payload_stdout.txt"
    #     fileOstdout.destinationDBlock = job.destinationDBlock
    #     fileOstdout.destinationSE     = job.destinationSE
    #     fileOstdout.dataset           = job.destinationDBlock
    #     fileOstdout.type = 'output'
    #     job.addFile(fileOstdout)

    #     fileOstderr = FileSpec()
    #     fileOstderr.lfn = "payload_stderr.txt"
    #     fileOstderr.destinationDBlock = job.destinationDBlock
    #     fileOstderr.destinationSE     = job.destinationSE
    #     fileOstderr.dataset           = job.destinationDBlock
    #     fileOstderr.type = 'output'
    #     job.addFile(fileOstderr)

    s, o = Client.submitJobs([job], srvID=aSrvID)
    logger.info(s)
    for x in o:
        logger.info("PandaID=%s" % x[0])
        if x[0] != 0 and x[0] != 'NULL':
            for j in files_list:
                j_update = Job.objects.get(id=j.id)
                j_update.panda_id_merging_evntdmp = x[0]
                j_update.status_merging_evntdmp = 'sent'
                j_update.attempt_merging_evntdmp = j_update.attempt_merging_evntdmp + 1
                j_update.chunk_number_merging_evntdmp = merge_chunk_number
                j_update.date_updated = today

                try:
                    j_update.save()
                    logger.info('Job %s with PandaID %s updated' %
                                (j.id, x[0]))
                except IntegrityError as e:
                    logger.exception('Unique together catched, was not saved')
                except DatabaseError as e:
                    logger.exception('Something went wrong while saving: %s' %
                                     e.message)
        else:
            logger.info('Job %s was not added to PanDA' % j.id)
Exemplo n.º 11
0
    'pandatest.000003.dd.input._00028.junk':
    '6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
    'pandatest.000003.dd.input._00033.junk':
    '98f79ba1-1793-4253-aac7-bdf90a51d1ee',
    'pandatest.000003.dd.input._00039.junk':
    '33660dd5-7cef-422a-a7fc-6c24cb10deb1'
}
for lfn in ids.keys():
    file = FileSpec()
    file.lfn = lfn
    file.GUID = ids[file.lfn]
    file.dataset = 'pandatest.000003.dd.input'
    file.type = 'input'
    job.addFile(file)

s, o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s, o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
print s
if s == 0:
    for job in o:
        if job == None:
            continue
        print job.PandaID
        for file in job.Files:
            print file.lfn, file.type
print "---------------------"
s, o = Client.queryPandaIDs([0])
Exemplo n.º 12
0
def main():
    logger.info('Getting tasks with status send and running')
    #    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running'))
    tasks_list = Task.objects.all().filter(name='dvcs2016P09t2r13v1_mu+')
    logger.info('Got list of %s tasks' % len(tasks_list))

    for t in tasks_list:
        logger.info('Getting jobs in status defined or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        i = 0
        for j in jobs_list:
            if i >= max_send_amount:
                break

            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'COMPASSPRODDISK'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
            TMPRAWFILE = j.file[j.file.rfind('/') + 1:]
            logger.info(TMPRAWFILE)
            TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % {
                'input_file': j.file,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            logger.info(TMPMDSTFILE)
            TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(TMPHISTFILE)
            TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(TMPRICHFILE)
            EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % {
                'prodSlt': j.task.prodslt,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(EVTDUMPFILE)
            STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDOUTFILE)
            STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDERRFILE)
            try:
                file_year = j.file.split('/')[5]
                logger.info(file_year)
            except:
                logger.error('Error while splitting file to get year')
                sys.exit(1)

            ProdPathAndName = j.task.home + j.task.path + j.task.soft

            job = JobSpec()
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % {
                'prodName': j.task.soft,
                'fileYear': file_year,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            job.prodSourceLabel = 'prod_test'
            job.computingSite = site
            job.attemptNr = j.attempt + 1
            job.maxAttempt = j.task.max_attempts
            if j.status == 'failed':
                job.parentID = j.panda_id
            head, tail = os.path.split(j.file)
            #            job.transferType = 'direct'
            job.sourceSite = 'CERN_COMPASS_PROD'

            # logs, and all files generated during execution will be placed in log (except output file)
            #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName}
            job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;rm %(tail)s' % {
                'TMPRAWFILE': TMPRAWFILE,
                'TMPMDSTFILE': TMPMDSTFILE,
                'TMPHISTFILE': TMPHISTFILE,
                'TMPRICHFILE': TMPRICHFILE,
                'input_file': j.file,
                'ProdPathAndName': ProdPathAndName,
                'prodPath': j.task.path,
                'prodName': j.task.soft,
                'template': j.task.template,
                'tail': tail,
                'prodSlt': j.task.prodslt,
                'EVTDUMPFILE': EVTDUMPFILE,
                'STDOUTFILE': STDOUTFILE,
                'STDERRFILE': STDERRFILE
            }

            fileIRaw = FileSpec()
            fileIRaw.lfn = "%s" % (j.file)
            fileIRaw.GUID = '5874a461-61d3-4543-8f34-6fd7a4624e78'
            fileIRaw.fsize = 1073753368
            fileIRaw.checksum = '671608be'
            fileIRaw.destinationDBlock = job.destinationDBlock
            fileIRaw.destinationSE = job.destinationSE
            fileIRaw.dataset = job.destinationDBlock
            fileIRaw.type = 'input'
            job.addFile(fileIRaw)

            fileOstdout = FileSpec()
            fileOstdout.lfn = "payload_stdout.txt"
            fileOstdout.destinationDBlock = job.destinationDBlock
            fileOstdout.destinationSE = job.destinationSE
            fileOstdout.dataset = job.destinationDBlock
            fileOstdout.type = 'output'
            job.addFile(fileOstdout)

            fileOstderr = FileSpec()
            fileOstderr.lfn = "payload_stderr.txt"
            fileOstderr.destinationDBlock = job.destinationDBlock
            fileOstderr.destinationSE = job.destinationSE
            fileOstderr.dataset = job.destinationDBlock
            fileOstderr.type = 'output'
            job.addFile(fileOstderr)

            fileOLog = FileSpec()
            fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % {
                'prodName': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            fileOmDST = FileSpec()
            fileOmDST.lfn = "%s" % (TMPMDSTFILE)
            fileOmDST.destinationDBlock = job.destinationDBlock
            fileOmDST.destinationSE = job.destinationSE
            fileOmDST.dataset = job.destinationDBlock
            fileOmDST.type = 'output'
            job.addFile(fileOmDST)

            fileOTrafdic = FileSpec()
            fileOTrafdic.lfn = "%s" % (TMPHISTFILE)
            fileOTrafdic.destinationDBlock = job.destinationDBlock
            fileOTrafdic.destinationSE = job.destinationSE
            fileOTrafdic.dataset = job.destinationDBlock
            fileOTrafdic.type = 'output'
            job.addFile(fileOTrafdic)

            fileOtestevtdump = FileSpec()
            fileOtestevtdump.lfn = "testevtdump.raw"
            fileOtestevtdump.destinationDBlock = job.destinationDBlock
            fileOtestevtdump.destinationSE = job.destinationSE
            fileOtestevtdump.dataset = job.destinationDBlock
            fileOtestevtdump.type = 'output'
            job.addFile(fileOtestevtdump)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            logger.info(o)
            #             for x in o:
            #                 logger.info("PandaID=%s" % x[0])
            #                 today = datetime.datetime.today()
            #
            #                 if x[0] != 0 and x[0] != 'NULL':
            #                     j_update = Job.objects.get(id=j.id)
            #                     j_update.panda_id = x[0]
            #                     j_update.status = 'sent'
            #                     j_update.attempt = j_update.attempt + 1
            #                     j_update.date_updated = today
            #
            #                     try:
            #                         j_update.save()
            #                         logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today))
            #                     except IntegrityError as e:
            #                         logger.exception('Unique together catched, was not saved')
            #                     except DatabaseError as e:
            #                         logger.exception('Something went wrong while saving: %s' % e.message)
            #                 else:
            #                     logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
Exemplo n.º 13
0
def main():
    logger.info('Getting tasks with status send and running')
    #    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running'))
    tasks_list = Task.objects.all().filter(name='TestTaskBW')
    logger.info('Got list of %s tasks' % len(tasks_list))

    for t in tasks_list:
        logger.info('Getting jobs in status defined or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        i = 0
        for j in jobs_list:
            if i >= max_send_amount:
                break

            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'local'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)

            job = JobSpec()
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = 'hello world'
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            job.prodSourceLabel = 'test'
            job.computingSite = site
            job.attemptNr = 1
            job.maxAttempt = 5
            job.sourceSite = 'BW_COMPASS_MCORE'
            job.VO = 'local'

            # logs, and all files generated during execution will be placed in log (except output file)
            job.jobParameters = 'python /u/sciteam/petrosya/panda/hello.py'

            fileOLog = FileSpec()
            fileOLog.lfn = "log.job.log.tgz"
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            logger.info(o)
            #             for x in o:
            #                 logger.info("PandaID=%s" % x[0])
            #                 today = datetime.datetime.today()
            #
            #                 if x[0] != 0 and x[0] != 'NULL':
            #                     j_update = Job.objects.get(id=j.id)
            #                     j_update.panda_id = x[0]
            #                     j_update.status = 'sent'
            #                     j_update.attempt = j_update.attempt + 1
            #                     j_update.date_updated = today
            #
            #                     try:
            #                         j_update.save()
            #                         logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today))
            #                     except IntegrityError as e:
            #                         logger.exception('Unique together catched, was not saved')
            #                     except DatabaseError as e:
            #                         logger.exception('Something went wrong while saving: %s' % e.message)
            #                 else:
            #                     logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
Exemplo n.º 14
0
        job.addFile(fileOTrafdic)

        fileOtestevtdump = FileSpec()
        fileOtestevtdump.lfn = "testevtdump.raw"
        fileOtestevtdump.destinationDBlock = job.destinationDBlock
        fileOtestevtdump.destinationSE = job.destinationSE
        fileOtestevtdump.dataset = job.destinationDBlock
        fileOtestevtdump.type = 'output'
        job.addFile(fileOtestevtdump)

        jobs.append(job)
        i += 1

    if len(jobs) > 0:
        print 'Submitting %s jobs' % len(jobs)
        s, o = Client.submitJobs(jobs, srvID=aSrvID)
        print s
        for x in o:
            print "PandaID=%s" % x[0]
            today = datetime.datetime.today()

            if x[0] != 0 and x[0] != 'NULL':
                j_update = Job.objects.get(id=j.id)
                j_update.panda_id = x[0]
                j_update.status = 'sent'
                j_update.attempt = j_update.attempt + 1
                j_update.date_updated = today

                try:
                    j_update.save()
                    print 'Job %s with PandaID %s updated at %s' % (j.id, x[0],
Exemplo n.º 15
0
def send_merging_job(task, files_list, merge_chunk_number):
    logger.info(
        'Going to send merging job for task %s run number %s and merge chunk number %s'
        % (task, files_list[0].run_number, merge_chunk_number))

    input_files = ''
    input_files_copy = ''
    for j in files_list:
        TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % {
            'runNumber': j.run_number,
            'runChunk': j.chunk_number,
            'prodSlt': j.task.prodslt,
            'phastVer': j.task.phastver
        }
        input_files += ' ' + TMPMDSTFILE
        if j.task.site == 'BW_COMPASS_MCORE':
            input_files_copy += ' cp /scratch/sciteam/criedl/projectdata/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;'
        else:
            input_files_copy += ' xrdcp -N -f root://eoscompass.cern.ch//eos/experiment/compass/' + task.path + task.soft + '/mDST.chunks/' + TMPMDSTFILE + ' .;'

    datasetName = '%(prodNameOnly)s.%(runNumber)s-%(prodSlt)s-%(phastVer)s-merging-mdst' % {
        'prodNameOnly': task.production,
        'runNumber': j.run_number,
        'prodSlt': task.prodslt,
        'phastVer': task.phastver
    }
    logger.info(datasetName)
    destName = 'local'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
    MERGEDHISTFILE = '%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % {
        'runNumber': j.run_number,
        'prodSlt': task.prodslt,
        'phastVer': task.phastver
    }
    if format(merge_chunk_number, '03d') != '000':
        MERGEDHISTFILE = MERGEDHISTFILE + '.' + format(merge_chunk_number,
                                                       '03d')
    logger.info(MERGEDHISTFILE)
    MERGEDMDSTFILE = 'mDST-%(runNumber)s-%(prodSlt)s-%(phastVer)s.root' % {
        'runNumber': j.run_number,
        'prodSlt': task.prodslt,
        'phastVer': task.phastver
    }
    if format(merge_chunk_number, '03d') != '000':
        MERGEDMDSTFILE = MERGEDMDSTFILE + '.' + format(merge_chunk_number,
                                                       '03d')
    logger.info(MERGEDMDSTFILE)
    TMPHISTFILE = 'merge-%(runNumber)s-ch%(mergeChunkNumber)s.root' % {
        'runNumber': j.run_number,
        'mergeChunkNumber': format(merge_chunk_number, '03d')
    }
    logger.info(TMPHISTFILE)
    PRODSOFT = task.soft
    ProdPathAndName = task.home + task.path + task.soft

    job = JobSpec()
    job.VO = 'vo.compass.cern.ch'
    job.taskID = task.id
    job.jobDefinitionID = 0
    job.jobName = '%(prodNameOnly)s-merge-mdst-%(runNumber)s-ch%(mergeChunkNumber)s' % {
        'prodNameOnly': task.production,
        'runNumber': j.run_number,
        'mergeChunkNumber': format(merge_chunk_number, '03d')
    }
    job.transformation = 'merging mdst'  # payload (can be URL as well)
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.currentPriority = 5000
    job.prodSourceLabel = 'prod_test'
    job.computingSite = task.site
    job.attemptNr = j.attempt_merging_mdst + 1
    job.maxAttempt = j.task.max_attempts
    if j.status_merging_mdst == 'failed':
        job.parentID = j.panda_id_merging_mdst

    if j.task.site == 'BW_COMPASS_MCORE':
        job.jobParameters = 'ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/scratch/sciteam/criedl/projectdata/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % {
            'MERGEDHISTFILE': MERGEDHISTFILE,
            'MERGEDMDSTFILE': MERGEDMDSTFILE,
            'PRODSOFT': PRODSOFT,
            'input_files_copy': input_files_copy,
            'input_files': input_files,
            'ProdPathAndName': ProdPathAndName,
            'prodPath': task.path,
            'prodName': task.production,
            'prodSlt': task.prodslt,
            'TMPHISTFILE': TMPHISTFILE
        }
    else:
        job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export prodSlt=%(prodSlt)s;export MERGEDHISTFILE=%(MERGEDHISTFILE)s;export MERGEDMDSTFILE=%(MERGEDMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export PRODSOFT=%(PRODSOFT)s;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;%(input_files_copy)sexport PHAST_mDST_MAX_SIZE=6000000000;$CORAL/../phast/phast -m -o %(MERGEDMDSTFILE)s %(input_files)s;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;gzip payload_stdout.out;' % {
            'MERGEDHISTFILE': MERGEDHISTFILE,
            'MERGEDMDSTFILE': MERGEDMDSTFILE,
            'PRODSOFT': PRODSOFT,
            'input_files_copy': input_files_copy,
            'input_files': input_files,
            'ProdPathAndName': ProdPathAndName,
            'prodPath': task.path,
            'prodName': task.production,
            'prodSlt': task.prodslt,
            'TMPHISTFILE': TMPHISTFILE
        }

    fileOLog = FileSpec()
    fileOLog.lfn = "%s.job.log.tgz" % (job.jobName)
    fileOLog.destinationDBlock = job.destinationDBlock
    fileOLog.destinationSE = job.destinationSE
    fileOLog.dataset = job.destinationDBlock
    fileOLog.type = 'log'
    job.addFile(fileOLog)

    fileOmDST = FileSpec()
    fileOmDST.lfn = "%s" % (MERGEDMDSTFILE)
    fileOmDST.destinationDBlock = job.destinationDBlock
    fileOmDST.destinationSE = job.destinationSE
    fileOmDST.dataset = job.destinationDBlock
    fileOmDST.type = 'output'
    job.addFile(fileOmDST)

    fileOstdout = FileSpec()
    fileOstdout.lfn = "payload_stdout.out.gz"
    fileOstdout.destinationDBlock = job.destinationDBlock
    fileOstdout.destinationSE = job.destinationSE
    fileOstdout.dataset = job.destinationDBlock
    fileOstdout.type = 'output'
    job.addFile(fileOstdout)

    #     fileOstderr = FileSpec()
    #     fileOstderr.lfn = "payload_stderr.txt"
    #     fileOstderr.destinationDBlock = job.destinationDBlock
    #     fileOstderr.destinationSE     = job.destinationSE
    #     fileOstderr.dataset           = job.destinationDBlock
    #     fileOstderr.type = 'output'
    #     job.addFile(fileOstderr)

    s, o = Client.submitJobs([job], srvID=aSrvID)
    logger.info(s)
    for x in o:
        logger.info("PandaID=%s" % x[0])
        if x[0] != 0 and x[0] != 'NULL':
            for j in files_list:
                j_update = Job.objects.get(id=j.id)
                j_update.panda_id_merging_mdst = x[0]
                j_update.status_merging_mdst = 'sent'
                j_update.attempt_merging_mdst = j_update.attempt_merging_mdst + 1
                j_update.chunk_number_merging_mdst = merge_chunk_number
                j_update.date_updated = today

                try:
                    j_update.save()
                    logger.info('Job %s with PandaID %s updated' %
                                (j.id, x[0]))
                except IntegrityError as e:
                    logger.exception('Unique together catched, was not saved')
                except DatabaseError as e:
                    logger.exception('Something went wrong while saving: %s' %
                                     e.message)
        else:
            logger.info('Job %s was not added to PanDA' % j.id)
Exemplo n.º 16
0
    fileD.type = 'input'
    job.addFile(fileD)

    fileOE = FileSpec()
    fileOE.lfn = "%s.HITS.pool.root" % job.jobName
    fileOE.destinationDBlock = job.destinationDBlock
    fileOE.destinationSE = job.destinationSE
    fileOE.dataset = job.destinationDBlock
    fileOE.destinationDBlockToken = 'ATLASDATADISK'
    fileOE.type = 'output'
    job.addFile(fileOE)

    fileOL = FileSpec()
    fileOL.lfn = "%s.job.log.tgz" % job.jobName
    fileOL.destinationDBlock = job.destinationDBlock
    fileOL.destinationSE = job.destinationSE
    fileOL.dataset = job.destinationDBlock
    fileOL.destinationDBlockToken = 'ATLASDATADISK'
    fileOL.type = 'log'
    job.addFile(fileOL)

    job.jobParameters="%s %s NONE 1 3250 55866 ATLAS-CSC-02-01-00 55866 55866 QGSP_EMV None %s DEFAULT" % \
                       (fileI.lfn,fileOE.lfn,fileD.lfn)
    jobList.append(job)

s, o = Client.submitJobs(jobList)
print "---------------------"
print s
for x in o:
    print "PandaID=%s" % x[0]
Exemplo n.º 17
0
    file.dataset = job.destinationDBlock
    file.type = 'output'
    job.addFile(file)

    fileOL = FileSpec()
    fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen')
    fileOL.destinationDBlock = job.destinationDBlock
    fileOL.destinationSE = job.destinationSE
    fileOL.dataset = job.destinationDBlock
    fileOL.type = 'log'
    job.addFile(fileOL)

    job.jobParameters = "5056 %s NONE 81000 9000 10 DC3.005056.PythiaPhotonJet2.py NONE" % file.lfn
    jobListE.append(job)

s, o = Client.submitJobs(jobListE)
print "---------------------"
print s
for x in o:
    print "PandaID=%s" % x[0]

time.sleep(20)

datasetNameS = 'panda.simu.%s' % commands.getoutput('uuidgen')

jobListS = []

for lfn in lfnListE:
    job = JobSpec()
    job.jobDefinitionID = int(time.time()) % 10000
    job.jobName = commands.getoutput('uuidgen')
Exemplo n.º 18
0
    job.currentPriority   = 1000
    job.prodSourceLabel   = 'test'
#    job.prodSourceLabel   = 'cloudtest'
    job.computingSite     = site
    
    file = FileSpec()
    file.lfn = "%s.evgen.pool.root" % job.jobName
    file.destinationDBlock = job.destinationDBlock
    file.destinationSE     = job.destinationSE
    file.dataset           = job.destinationDBlock
    file.type = 'output'
    job.addFile(file)
    
    fileOL = FileSpec()
    fileOL.lfn = "%s.job.log.tgz" % job.jobName
    fileOL.destinationDBlock = job.destinationDBlock
    fileOL.destinationSE     = job.destinationSE
    fileOL.dataset           = job.destinationDBlock
    fileOL.type = 'log'
    job.addFile(fileOL)
    
    job.jobParameters="8072 0 5000 1 DC3.008072.JimmyPhotonJet1.py %s NONE NONE NONE" % file.lfn
    jobList.append(job)

for i in range(1):
    s,o = Client.submitJobs(jobList)
    print "---------------------"
    print s
    for x in o:
        print "PandaID=%s" % x[0]
Exemplo n.º 19
0
#job.transformation    = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh'
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE     = destName
job.destinationSE = 'local'
job.currentPriority = 1000
#job.prodSourceLabel   = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
#job.prodSourceLabel = 'user'
job.prodSourceLabel = 'panda'
job.computingSite = site
job.jobParameters = ""
job.VO = "lsst"

fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)

s, o = Client.submitJobs([job], srvID=aSrvID)
print s
for x in o:
    print "PandaID=%s" % x[0]