Example #1
0
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec

if len(sys.argv) > 1:
    site = sys.argv[1]
else:
    site = None

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = None

jobList = []

for i in range(1):
    job = JobSpec()
    job.jobDefinitionID = int(time.time()) % 10000
    job.jobName = "%s_%d" % (str(uuid.uuid4()), i)
    job.AtlasRelease = 'Atlas-14.1.0'
    job.homepackage = 'AtlasProduction/14.1.0.3'
    job.transformation = 'csc_evgen_trf.py'
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.currentPriority = 100
    job.prodSourceLabel = 'test'
    job.computingSite = site
    job.cloud = 'US'
    job.cmtConfig = 'i686-slc4-gcc34-opt'

    file = FileSpec()
    file.lfn = "%s.evgen.pool.root" % job.jobName
Example #2
0
from pandaserver.taskbuffer.FileSpec import FileSpec

aSrvID = None

for idx, argv in enumerate(sys.argv):
    if argv == '-s':
        aSrvID = sys.argv[idx + 1]
        sys.argv = sys.argv[:idx]
        break

site = sys.argv[1]

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = 'local'

job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s" % str(uuid.uuid4())
# MPI transform on Titan that will run actual job
job.transformation = '/lustre/atlas/proj-shared/csc108/panitkin/alicetest1/m\
pi_wrapper_alice_ppbench.py'

job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 1000
job.prodSourceLabel = 'panda'
job.computingSite = site
job.jobParameters = " "
job.VO = 'alice'

fileOL = FileSpec()
Example #3
0
    and PIPELINE_STREAM is not None:
    jobName = 'job.%(PIPELINE_PROCESSINSTANCE)s.%(PIPELINE_TASK)s.%(PIPELINE_EXECUTIONNUMBER)s.%(prodUserName)s.%(PIPELINE_STREAM)s' % \
    {'prodUserName': str(prodUserName), \
     'PIPELINE_TASK': str(PIPELINE_TASK), \
     'PIPELINE_EXECUTIONNUMBER': str(PIPELINE_EXECUTIONNUMBER), \
     'PIPELINE_STREAM': str(PIPELINE_STREAM), \
     'PIPELINE_PROCESSINSTANCE': str(PIPELINE_PROCESSINSTANCE) \
     }
else:
    jobName = "%s" % str(uuid.uuid4())

if PIPELINE_STREAM is not None:
    jobDefinitionID = PIPELINE_STREAM
else:
    jobDefinitionID = int(time.time()) % 10000
job = JobSpec()
job.jobDefinitionID = jobDefinitionID
job.jobName = jobName
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh'
job.destinationDBlock = datasetName
job.destinationSE = 'local'
job.currentPriority = 1000
job.prodSourceLabel = 'panda'
job.jobParameters = ' --lsstJobParams="%s" ' % lsstJobParams
if prodUserName is not None:
    job.prodUserName = prodUserName
else:
    job.prodUserName = prodUserNameDefault
if PIPELINE_PROCESSINSTANCE is not None:
    job.taskID = PIPELINE_PROCESSINSTANCE
if PIPELINE_EXECUTIONNUMBER is not None:
Example #4
0
import time
import uuid
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec

site = sys.argv[1]
cloud = sys.argv[2]

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = None

jobList = []

for i in range(1):
    job = JobSpec()
    job.jobDefinitionID = int(time.time()) % 10000
    job.jobName = "%s_%d" % (str(uuid.uuid4()), i)
    job.AtlasRelease = 'Atlas-17.0.5'
    job.homepackage = 'AtlasProduction/17.0.5.6'
    job.transformation = 'Evgen_trf.py'
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.currentPriority = 10000
    job.prodSourceLabel = 'test'
    job.computingSite = site
    job.cloud = cloud
    job.cmtConfig = 'i686-slc5-gcc43-opt'

    file = FileSpec()
    file.lfn = "%s.evgen.pool.root" % job.jobName
Example #5
0
 def run(self):
     try:
         self.putLog('start %s' % self.evpFileName)
         # lock evp file
         self.evpFile = open(self.evpFileName)
         try:
             fcntl.flock(self.evpFile.fileno(),
                         fcntl.LOCK_EX | fcntl.LOCK_NB)
         except Exception:
             # relase
             self.putLog("cannot lock %s" % self.evpFileName)
             self.evpFile.close()
             return True
         # options
         runEvtList = []
         eventPickDataType = ''
         eventPickStreamName = ''
         eventPickDS = []
         eventPickAmiTag = ''
         eventPickNumSites = 1
         inputFileList = []
         tagDsList = []
         tagQuery = ''
         tagStreamRef = ''
         skipDaTRI = False
         runEvtGuidMap = {}
         ei_api = ''
         # read evp file
         for tmpLine in self.evpFile:
             tmpMatch = re.search('^([^=]+)=(.+)$', tmpLine)
             # check format
             if tmpMatch is None:
                 continue
             tmpItems = tmpMatch.groups()
             if tmpItems[0] == 'runEvent':
                 # get run and event number
                 tmpRunEvt = tmpItems[1].split(',')
                 if len(tmpRunEvt) == 2:
                     runEvtList.append(tmpRunEvt)
             elif tmpItems[0] == 'eventPickDataType':
                 # data type
                 eventPickDataType = tmpItems[1]
             elif tmpItems[0] == 'eventPickStreamName':
                 # stream name
                 eventPickStreamName = tmpItems[1]
             elif tmpItems[0] == 'eventPickDS':
                 # dataset pattern
                 eventPickDS = tmpItems[1].split(',')
             elif tmpItems[0] == 'eventPickAmiTag':
                 # AMI tag
                 eventPickAmiTag = tmpItems[1]
             elif tmpItems[0] == 'eventPickNumSites':
                 # the number of sites where datasets are distributed
                 try:
                     eventPickNumSites = int(tmpItems[1])
                 except Exception:
                     pass
             elif tmpItems[0] == 'userName':
                 # user name
                 self.userDN = tmpItems[1]
                 self.putLog("user=%s" % self.userDN)
             elif tmpItems[0] == 'userTaskName':
                 # user task name
                 self.userTaskName = tmpItems[1]
             elif tmpItems[0] == 'userDatasetName':
                 # user dataset name
                 self.userDatasetName = tmpItems[1]
             elif tmpItems[0] == 'lockedBy':
                 # client name
                 self.lockedBy = tmpItems[1]
             elif tmpItems[0] == 'creationTime':
                 # creation time
                 self.creationTime = tmpItems[1]
             elif tmpItems[0] == 'params':
                 # parameters
                 self.params = tmpItems[1]
             elif tmpItems[0] == 'ei_api':
                 # ei api parameter for MC
                 ei_api = tmpItems[1]
             elif tmpItems[0] == 'inputFileList':
                 # input file list
                 inputFileList = tmpItems[1].split(',')
                 try:
                     inputFileList.remove('')
                 except Exception:
                     pass
             elif tmpItems[0] == 'tagDS':
                 # TAG dataset
                 tagDsList = tmpItems[1].split(',')
             elif tmpItems[0] == 'tagQuery':
                 # query for TAG
                 tagQuery = tmpItems[1]
             elif tmpItems[0] == 'tagStreamRef':
                 # StreamRef for TAG
                 tagStreamRef = tmpItems[1]
                 if not tagStreamRef.endswith('_ref'):
                     tagStreamRef += '_ref'
             elif tmpItems[0] == 'runEvtGuidMap':
                 # GUIDs
                 try:
                     runEvtGuidMap = eval(tmpItems[1])
                 except Exception:
                     pass
         # extract task name
         if self.userTaskName == '' and self.params != '':
             try:
                 tmpMatch = re.search('--outDS(=| ) *([^ ]+)', self.params)
                 if tmpMatch is not None:
                     self.userTaskName = tmpMatch.group(2)
                     if not self.userTaskName.endswith('/'):
                         self.userTaskName += '/'
             except Exception:
                 pass
         # suppress DaTRI
         if self.params != '':
             if '--eventPickSkipDaTRI' in self.params:
                 skipDaTRI = True
         # get compact user name
         compactDN = self.taskBuffer.cleanUserID(self.userDN)
         # get jediTaskID
         self.jediTaskID = self.taskBuffer.getTaskIDwithTaskNameJEDI(
             compactDN, self.userTaskName)
         # get prodSourceLabel
         self.prodSourceLabel, self.job_label = self.taskBuffer.getProdSourceLabelwithTaskID(
             self.jediTaskID)
         # convert run/event list to dataset/file list
         tmpRet, locationMap, allFiles = self.pd2p.convertEvtRunToDatasets(
             runEvtList, eventPickDataType, eventPickStreamName,
             eventPickDS, eventPickAmiTag, self.userDN, runEvtGuidMap,
             ei_api)
         if not tmpRet:
             if 'isFatal' in locationMap and locationMap['isFatal'] is True:
                 self.ignoreError = False
             self.endWithError(
                 'Failed to convert the run/event list to a dataset/file list'
             )
             return False
         # use only files in the list
         if inputFileList != []:
             tmpAllFiles = []
             for tmpFile in allFiles:
                 if tmpFile['lfn'] in inputFileList:
                     tmpAllFiles.append(tmpFile)
             allFiles = tmpAllFiles
         # remove redundant CN from DN
         tmpDN = self.userDN
         tmpDN = re.sub('/CN=limited proxy', '', tmpDN)
         tmpDN = re.sub('(/CN=proxy)+$', '', tmpDN)
         # make dataset container
         tmpRet = self.pd2p.registerDatasetContainerWithDatasets(
             self.userDatasetName,
             allFiles,
             locationMap,
             nSites=eventPickNumSites,
             owner=tmpDN)
         if not tmpRet:
             self.endWithError('Failed to make a dataset container %s' %
                               self.userDatasetName)
             return False
         # skip DaTRI
         if skipDaTRI:
             # successfully terminated
             self.putLog("skip DaTRI")
             # update task
             self.taskBuffer.updateTaskModTimeJEDI(self.jediTaskID)
         else:
             # get candidates
             tmpRet, candidateMaps = self.pd2p.getCandidates(
                 self.userDatasetName,
                 self.prodSourceLabel,
                 self.job_label,
                 checkUsedFile=False,
                 useHidden=True)
             if not tmpRet:
                 self.endWithError(
                     'Failed to find candidate for destination')
                 return False
             # collect all candidates
             allCandidates = []
             for tmpDS in candidateMaps:
                 tmpDsVal = candidateMaps[tmpDS]
                 for tmpCloud in tmpDsVal:
                     tmpCloudVal = tmpDsVal[tmpCloud]
                     for tmpSiteName in tmpCloudVal[0]:
                         if tmpSiteName not in allCandidates:
                             allCandidates.append(tmpSiteName)
             if allCandidates == []:
                 self.endWithError('No candidate for destination')
                 return False
             # get list of dataset (container) names
             if eventPickNumSites > 1:
                 # decompose container to transfer datasets separately
                 tmpRet, tmpOut = self.pd2p.getListDatasetReplicasInContainer(
                     self.userDatasetName)
                 if not tmpRet:
                     self.endWithError('Failed to get replicas in %s' %
                                       self.userDatasetName)
                     return False
                 userDatasetNameList = list(tmpOut)
             else:
                 # transfer container at once
                 userDatasetNameList = [self.userDatasetName]
             # loop over all datasets
             sitesUsed = []
             for tmpUserDatasetName in userDatasetNameList:
                 # get size of dataset container
                 tmpRet, totalInputSize = rucioAPI.getDatasetSize(
                     tmpUserDatasetName)
                 if not tmpRet:
                     self.endWithError(
                         'Failed to get the size of {0} with {1}'.format(
                             tmpUserDatasetName, totalInputSize))
                     return False
                 # run brokerage
                 tmpJob = JobSpec()
                 tmpJob.AtlasRelease = ''
                 self.putLog("run brokerage for %s" % tmpDS)
                 pandaserver.brokerage.broker.schedule(
                     [tmpJob],
                     self.taskBuffer,
                     self.siteMapper,
                     True,
                     allCandidates,
                     True,
                     datasetSize=totalInputSize)
                 if tmpJob.computingSite.startswith('ERROR'):
                     self.endWithError('brokerage failed with %s' %
                                       tmpJob.computingSite)
                     return False
                 self.putLog("site -> %s" % tmpJob.computingSite)
                 # send transfer request
                 try:
                     tmpDN = rucioAPI.parse_dn(tmpDN)
                     tmpStatus, userInfo = rucioAPI.finger(tmpDN)
                     if not tmpStatus:
                         raise RuntimeError(
                             'user info not found for {0} with {1}'.format(
                                 tmpDN, userInfo))
                     tmpDN = userInfo['nickname']
                     tmpSiteSpec = self.siteMapper.getSite(
                         tmpJob.computingSite)
                     scope_input, scope_output = select_scope(
                         tmpSiteSpec, JobUtils.ANALY_PS, JobUtils.ANALY_PS)
                     tmpDQ2ID = tmpSiteSpec.ddm_input[scope_input]
                     tmpMsg = "%s ds=%s site=%s id=%s" % (
                         'registerDatasetLocation for DaTRI ',
                         tmpUserDatasetName, tmpDQ2ID, tmpDN)
                     self.putLog(tmpMsg)
                     rucioAPI.registerDatasetLocation(
                         tmpDS, [tmpDQ2ID],
                         lifetime=14,
                         owner=tmpDN,
                         activity="User Subscriptions")
                     self.putLog('OK')
                 except Exception:
                     errType, errValue = sys.exc_info()[:2]
                     tmpStr = 'Failed to send transfer request : %s %s' % (
                         errType, errValue)
                     tmpStr.strip()
                     tmpStr += traceback.format_exc()
                     self.endWithError(tmpStr)
                     return False
                 # list of sites already used
                 sitesUsed.append(tmpJob.computingSite)
                 self.putLog("used %s sites" % len(sitesUsed))
                 # set candidates
                 if len(sitesUsed) >= eventPickNumSites:
                     # reset candidates to limit the number of sites
                     allCandidates = sitesUsed
                     sitesUsed = []
                 else:
                     # remove site
                     allCandidates.remove(tmpJob.computingSite)
             # send email notification for success
             tmpMsg = 'A transfer request was successfully sent to Rucio.\n'
             tmpMsg += 'Your task will get started once transfer is completed.'
             self.sendEmail(True, tmpMsg)
         try:
             # unlock and delete evp file
             fcntl.flock(self.evpFile.fileno(), fcntl.LOCK_UN)
             self.evpFile.close()
             os.remove(self.evpFileName)
         except Exception:
             pass
         # successfully terminated
         self.putLog("end %s" % self.evpFileName)
         return True
     except Exception:
         errType, errValue = sys.exc_info()[:2]
         self.endWithError('Got exception %s:%s %s' %
                           (errType, errValue, traceback.format_exc()))
         return False
Example #6
0
    def run(self):
        try:
            while True:
                _logger.debug('%s start' % self.pandaID)
                # query job
                job = self.taskBuffer.peekJobs([self.pandaID],
                                               fromDefined=False,
                                               fromArchived=False,
                                               fromWaiting=False)[0]
                _logger.debug('%s in %s' % (self.pandaID, job.jobStatus))
                # check job status
                if job is None:
                    _logger.debug('%s escape : not found' % self.pandaID)
                    return
                if job.jobStatus not in [
                        'running', 'sent', 'starting', 'holding', 'stagein',
                        'stageout'
                ]:
                    if job.jobStatus == 'transferring' and (
                            job.prodSourceLabel in ['user', 'panda']
                            or job.jobSubStatus not in [None, 'NULL', '']):
                        pass
                    else:
                        _logger.debug('%s escape : %s' %
                                      (self.pandaID, job.jobStatus))
                        return
                # time limit
                timeLimit = datetime.datetime.utcnow() - datetime.timedelta(
                    minutes=self.sleepTime)
                if job.modificationTime < timeLimit or (
                        job.endTime != 'NULL' and job.endTime < timeLimit):
                    _logger.debug(
                        '%s %s lastmod:%s endtime:%s' %
                        (job.PandaID, job.jobStatus, str(
                            job.modificationTime), str(job.endTime)))
                    destDBList = []
                    if job.jobStatus == 'sent':
                        # sent job didn't receive reply from pilot within 30 min
                        job.jobDispatcherErrorCode = ErrorCode.EC_SendError
                        job.jobDispatcherErrorDiag = "Sent job didn't receive reply from pilot within 30 min"
                    elif job.exeErrorDiag == 'NULL' and job.pilotErrorDiag == 'NULL':
                        # lost heartbeat
                        if job.jobDispatcherErrorDiag == 'NULL':
                            if job.endTime == 'NULL':
                                # normal lost heartbeat
                                job.jobDispatcherErrorCode = ErrorCode.EC_Watcher
                                job.jobDispatcherErrorDiag = 'lost heartbeat : %s' % str(
                                    job.modificationTime)
                            else:
                                if job.jobStatus == 'holding':
                                    job.jobDispatcherErrorCode = ErrorCode.EC_Holding
                                elif job.jobStatus == 'transferring':
                                    job.jobDispatcherErrorCode = ErrorCode.EC_Transferring
                                else:
                                    job.jobDispatcherErrorCode = ErrorCode.EC_Timeout
                                job.jobDispatcherErrorDiag = 'timeout in {0} : last heartbeat at {1}'.format(
                                    job.jobStatus, str(job.endTime))
                            # get worker
                            workerSpecs = self.taskBuffer.getWorkersForJob(
                                job.PandaID)
                            if len(workerSpecs) > 0:
                                workerSpec = workerSpecs[0]
                                if workerSpec.status in [
                                        'finished', 'failed', 'cancelled',
                                        'missed'
                                ]:
                                    job.supErrorCode = SupErrors.error_codes[
                                        'WORKER_ALREADY_DONE']
                                    job.supErrorDiag = 'worker already {0} at {1} with {2}'.format(
                                        workerSpec.status,
                                        str(workerSpec.endTime),
                                        workerSpec.diagMessage)
                                    job.supErrorDiag = JobSpec.truncateStringAttr(
                                        'supErrorDiag', job.supErrorDiag)
                    else:
                        # job recovery failed
                        job.jobDispatcherErrorCode = ErrorCode.EC_Recovery
                        job.jobDispatcherErrorDiag = 'job recovery failed for %s hours' % (
                            self.sleepTime / 60)
                    # set job status
                    job.jobStatus = 'failed'
                    # set endTime for lost heartbeat
                    if job.endTime == 'NULL':
                        # normal lost heartbeat
                        job.endTime = job.modificationTime
                    # set files status
                    for file in job.Files:
                        if file.type == 'output' or file.type == 'log':
                            file.status = 'failed'
                            if file.destinationDBlock not in destDBList:
                                destDBList.append(file.destinationDBlock)
                    # event service
                    if EventServiceUtils.isEventServiceJob(
                            job
                    ) and not EventServiceUtils.isJobCloningJob(job):
                        eventStat = self.taskBuffer.getEventStat(
                            job.jediTaskID, job.PandaID)
                        # set sub status when no sucessful events
                        if EventServiceUtils.ST_finished not in eventStat:
                            job.jobSubStatus = 'es_heartbeat'
                    # update job
                    self.taskBuffer.updateJobs([job], False)
                    # start closer
                    if job.jobStatus == 'failed':

                        source = 'jobDispatcherErrorCode'
                        error_code = job.jobDispatcherErrorCode
                        error_diag = job.jobDispatcherErrorDiag

                        try:
                            _logger.debug(
                                "Watcher will call apply_retrial_rules")
                            retryModule.apply_retrial_rules(
                                self.taskBuffer, job.PandaID, source,
                                error_code, error_diag, job.attemptNr)
                            _logger.debug("apply_retrial_rules is back")
                        except Exception as e:
                            _logger.debug(
                                "apply_retrial_rules excepted and needs to be investigated (%s): %s"
                                % (e, traceback.format_exc()))

                        # updateJobs was successful and it failed a job with taskBufferErrorCode
                        try:

                            _logger.debug("Watcher.run will peek the job")
                            job_tmp = self.taskBuffer.peekJobs(
                                [job.PandaID],
                                fromDefined=False,
                                fromArchived=True,
                                fromWaiting=False)[0]
                            if job_tmp.taskBufferErrorCode:
                                source = 'taskBufferErrorCode'
                                error_code = job_tmp.taskBufferErrorCode
                                error_diag = job_tmp.taskBufferErrorDiag
                                _logger.debug(
                                    "Watcher.run 2 will call apply_retrial_rules"
                                )
                                retryModule.apply_retrial_rules(
                                    self.taskBuffer, job_tmp.PandaID, source,
                                    error_code, error_diag, job_tmp.attemptNr)
                                _logger.debug("apply_retrial_rules 2 is back")
                        except IndexError:
                            pass
                        except Exception as e:
                            self.logger.error(
                                "apply_retrial_rules 2 excepted and needs to be investigated (%s): %s"
                                % (e, traceback.format_exc()))

                        cThr = Closer(self.taskBuffer, destDBList, job)
                        cThr.start()
                        cThr.join()
                    _logger.debug('%s end' % job.PandaID)
                    return
                # single action
                if self.single:
                    return
                # sleep
                time.sleep(60 * self.sleepTime)
        except Exception:
            type, value, traceBack = sys.exc_info()
            _logger.error("run() : %s %s" % (type, value))
            return
Example #7
0
    def defineEvgen16Job(self, i):
        """Define an Evgen16 job based on predefined values and randomly generated names
        """

        job = JobSpec()
        job.computingSite = self.__site
        job.cloud = self.__cloud

        job.jobDefinitionID = int(time.time()) % 10000
        job.jobName = "%s_%d" % (uuid.uuid1(), i)
        job.AtlasRelease = 'Atlas-16.6.2'
        job.homepackage = 'AtlasProduction/16.6.2.1'
        job.transformation = 'Evgen_trf.py'
        job.destinationDBlock = self.__datasetName
        job.destinationSE = self.__destName
        job.currentPriority = 10000
        job.prodSourceLabel = 'test'
        job.cmtConfig = 'i686-slc5-gcc43-opt'

        #Output file
        fileO = FileSpec()
        fileO.lfn = "%s.evgen.pool.root" % job.jobName
        fileO.destinationDBlock = job.destinationDBlock
        fileO.destinationSE = job.destinationSE
        fileO.dataset = job.destinationDBlock
        fileO.destinationDBlockToken = 'ATLASDATADISK'
        fileO.type = 'output'
        job.addFile(fileO)

        #Log file
        fileL = FileSpec()
        fileL.lfn = "%s.job.log.tgz" % job.jobName
        fileL.destinationDBlock = job.destinationDBlock
        fileL.destinationSE = job.destinationSE
        fileL.dataset = job.destinationDBlock
        fileL.destinationDBlockToken = 'ATLASDATADISK'
        fileL.type = 'log'
        job.addFile(fileL)

        job.jobParameters = "2760 105048 19901 101 200 MC10.105048.PythiaB_ccmu3mu1X.py %s NONE NONE NONE MC10JobOpts-latest-test.tar.gz" % fileO.lfn
        return job
Example #8
0
files = {
    'misal1_mc12.005802.JF17_pythia_jet_filter.digit.RDO.v12000601_tid008610._11615.pool.root.1':
    None,
    #'misal1_mc12.005802.JF17_pythia_jet_filter.digit.RDO.v12000601_tid008610._11639.pool.root.1':None,
    #'misal1_mc12.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000601_tid007554._03634.pool.root.1':None,
    #'misal1_mc12.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000601_tid007554._03248.pool.root.1':None,
    #'misal1_mc12.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000601_tid007554._03634.pool.root.1':None,
}

jobList = []

index = 0
for lfn in files:
    index += 1
    job = JobSpec()
    job.jobDefinitionID = int(time.time()) % 10000
    job.jobName = "%s_%d" % (str(uuid.uuid4()), index)
    job.AtlasRelease = 'Atlas-12.0.6'
    job.homepackage = 'AtlasProduction/12.0.6.4'
    job.transformation = 'csc_reco_trf.py'
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.computingSite = site
    #job.prodDBlock        = 'misal1_mc12.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000601_tid007554'
    job.prodDBlock = 'misal1_mc12.005802.JF17_pythia_jet_filter.digit.RDO.v12000601_tid008610'
    job.cloud = 'US'

    job.prodSourceLabel = 'test'
    job.currentPriority = 10000
    job.cmtConfig = 'i686-slc4-gcc34-opt'
Example #9
0
    if argv == '-s':
        aSrvID = sys.argv[idx + 1]
        sys.argv = sys.argv[:idx]
        break

#site = sys.argv[1]
site = 'ANALY_BNL-LSST'  #orig
#site = 'BNL-LSST'
#site = 'SWT2_CPB-LSST'
#site = 'UTA_SWT2-LSST'
#site = 'ANALY_SWT2_CPB-LSST'

datasetName = 'panda.user.jschovan.lsst.%s' % str(uuid.uuid4())
destName = None

job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s" % str(uuid.uuid4())
### job.transformation    = 'http://www.usatlas.bnl.gov/~wenaus/lsst-trf/lsst-trf.sh'
#job.transformation    = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh'
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE     = destName
job.destinationSE = 'local'
job.currentPriority = 1000
#job.prodSourceLabel   = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
Example #10
0
    cloud      = sys.argv[2]
    prodDBlock = sys.argv[3]
    inputFile  = sys.argv[4]

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())

files = {
    inputFile:None,
    }

jobList = []

index = 0
for lfn in files:
    index += 1
    job = JobSpec()
    job.jobDefinitionID   = (time.time()) % 10000
    job.jobName           = "%s_%d" % (str(uuid.uuid4()),index)
    job.AtlasRelease      = 'Atlas-17.0.5'
    job.homepackage       = 'AtlasProduction/17.0.5.6'
    job.transformation    = 'AtlasG4_trf.py'
    job.destinationDBlock = datasetName
    job.computingSite     = site
    job.prodDBlock        = prodDBlock
    
    job.prodSourceLabel   = 'test'
    job.processingType    = 'test'
    job.currentPriority   = 10000
    job.cloud             = cloud
    job.cmtConfig         = 'i686-slc5-gcc43-opt'
Example #11
0
cloud = 'US'

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = 'BNL_ATLAS_2'

files = {
    'EVNT.023986._00001.pool.root.1': None,
    #'EVNT.023989._00001.pool.root.1':None,
}

jobList = []

index = 0
for lfn in files:
    index += 1
    job = JobSpec()
    job.jobDefinitionID = (time.time()) % 10000
    job.jobName = "%s_%d" % (str(uuid.uuid4()), index)
    job.AtlasRelease = 'Atlas-14.2.20'
    job.homepackage = 'AtlasProduction/14.2.20.1'
    job.transformation = 'csc_simul_reco_trf.py'
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.computingSite = site
    job.prodDBlock = 'mc08.105031.Jimmy_jetsJ2.evgen.EVNT.e347_tid023986'
    #job.prodDBlock        = 'mc08.105034.Jimmy_jetsJ5.evgen.EVNT.e347_tid023989'

    job.prodSourceLabel = 'test'
    job.processingType = 'test'
    job.currentPriority = 10000
    job.cloud = cloud
Example #12
0
else:
    site = None

datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = 'BNL_ATLAS_2'
#destName    = 'BU_ATLAS_Tier2'

files = {
    'mc11.007204.singlepart_mu4.evgen.EVNT.v11000302._00037.pool.root.1': None,
    'mc11.007204.singlepart_mu4.evgen.EVNT.v11000302._00038.pool.root.1': None,
}

jobList = []

for lfn in files:
    job = JobSpec()
    job.jobDefinitionID = int(time.time()) % 10000
    job.jobName = str(uuid.uuid4())
    job.AtlasRelease = 'Atlas-11.0.3'
    job.homepackage = 'JobTransforms-11-00-03-02'
    job.transformation = 'share/csc.simul.trf'
    job.destinationDBlock = datasetName
    job.destinationSE = destName
    job.computingSite = site
    job.prodDBlock = 'mc11.007204.singlepart_mu4.evgen.EVNT.v11000302'
    job.cmtConfig = 'i686-slc4-gcc34-opt'

    job.prodSourceLabel = 'test'
    job.currentPriority = 1000

    fileI = FileSpec()