예제 #1
0
def retrievePandaJobs(job, jIDs):
    '''
    methods for retrieving panda job ids of panda jobs given a jobDefId
    '''
    from pandatools import Client

    ick = False
    jstatus = ''
    num_pjobs = 0

    logger.debug("retrievePandaJobs jIDs=%s" % jIDs)

    # get status from Panda server
    rc, jobsStatus = Client.getFullJobStatus(jIDs, False)
    if rc:
        logger.error('Return code %d retrieving job status information.', rc)
        raise BackendError(
            'Jedi', 'Return code %d retrieving job status information.' % rc)

    for status in jobsStatus:
        if not status: continue

        jstatus = status.jobStatus
        if status.jobStatus == None:
            logger.warning('No panda jobs expected')
            job.backend.pandajobs = []

        elif status.jobStatus in [
                "defined", "activated", "running", "failed", "finished",
                "holding", "assigned"
        ]:
            logger.debug('Panda jobs are running')
            logger.debug("PandaID: %d" % status.PandaID)

            pjobj = JediPandaJob()
            pjobj.id = status.PandaID
            pjobj.url = 'http://panda.cern.ch/?job=%d' % status.PandaID
            pjobj.jopSpec = dict(zip(status._attributes, status.values()))
            for k in pjobj.jobSpec.keys():
                if type(pjobj.jobSpec[k]) not in [type(''), type(1)]:
                    pjobj.jobSpec[k] = str(pjobj.jobSpec[k])

            if pjobj not in job.backend.pandajobs:
                job.backend.pandajobs.append(pjobj)
            else:
                logger.debug("Panda job %s already exists locally" % pjobj.id)

            num_pjobs += 1
        else:
            logger.warning(
                "getFullJobStatus returned unsupported status %s for Panda job %s "
                % (status.jobStatus, status.PandaID))

        ick = True

    return (ick, jstatus, num_pjobs)
예제 #2
0
파일: Jedi.py 프로젝트: MannyMoo/ganga
def retrievePandaJobs(job, jIDs):
    '''
    methods for retrieving panda job ids of panda jobs given a jobDefId
    '''
    from pandatools import Client

    ick       = False
    jstatus    = ''
    num_pjobs = 0

    logger.debug("retrievePandaJobs jIDs=%s" %jIDs)

    # get status from Panda server
    rc, jobsStatus = Client.getFullJobStatus(jIDs,False)
    if rc:
        logger.error('Return code %d retrieving job status information.',rc)
        raise BackendError('Jedi','Return code %d retrieving job status information.' % rc)

    for status in jobsStatus:
        if not status: continue

        jstatus = status.jobStatus
        if status.jobStatus == None:
            logger.warning('No panda jobs expected')
            job.backend.pandajobs = []

        elif status.jobStatus in [ "defined", "activated", "running", "failed", "finished", "holding", "assigned"]:
            logger.debug('Panda jobs are running')
            logger.debug("PandaID: %d" % status.PandaID)

            pjobj         = JediPandaJob()
            pjobj.id      = status.PandaID
            pjobj.url     = 'http://panda.cern.ch/?job=%d' % status.PandaID
            pjobj.jopSpec = dict(zip(status._attributes,status.values()))
            for k in pjobj.jobSpec.keys():
                if type(pjobj.jobSpec[k]) not in [type(''),type(1)]:
                    pjobj.jobSpec[k]=str(pjobj.jobSpec[k])

            if pjobj not in job.backend.pandajobs:
                job.backend.pandajobs.append(pjobj)
            else:
                logger.debug("Panda job %s already exists locally" % pjobj.id)
                
            num_pjobs += 1
        else:
            logger.warning("getFullJobStatus returned unsupported status %s for Panda job %s " %(status.jobStatus, status.PandaID) )
            
        ick = True

    return (ick, jstatus, num_pjobs)
예제 #3
0
 def status(self, JobID, forceUpdate=False):
     # get logger
     tmpLog = PLogger.getPandaLogger()
     # check proxy
     self.gridPassPhrase, self.vomsFQAN = PsubUtils.checkGridProxy(
         self.gridPassPhrase, False, self.verbose)
     # get job info from local repository
     job = self.getJobInfo(JobID)
     if job == None:
         # not found
         return None
     # update if needed
     if job.dbStatus != 'frozen' or forceUpdate:
         tmpLog.info("Getting status for JobID=%s ..." % JobID)
         # get status from Panda server
         status, pandaIDstatus = Client.getPandIDsWithJobID(
             JobID, verbose=self.verbose)
         if status != 0:
             tmpLog.error("Failed to get status for JobID=%s" % JobID)
             return None
         # get one job to set computingSite which may have changed due to rebrokerage
         pandaJob = None
         if pandaIDstatus != {}:
             tmpPandaIDs = pandaIDstatus.keys()
             tmpPandaIDs.sort()
             status, tmpPandaJobs = Client.getFullJobStatus(
                 tmpPandaIDs[:1], verbose=self.verbose)
             if status != 0:
                 tmpLog.error("Failed to get PandaJobs for %s" % JobID)
                 return None
             pandaJob = tmpPandaJobs[0]
         # convert to local job spec
         job = PdbUtils.convertPtoD([],
                                    pandaIDstatus,
                                    job,
                                    pandaJobForSiteID=pandaJob)
         # update DB
         try:
             PdbUtils.updateJobDB(job, self.verbose)
         except:
             tmpLog.error("Failed to update local repository for JobID=%s" %
                          JobID)
             return None
         tmpLog.info("Updated JobID=%s" % JobID)
     # return
     return job
예제 #4
0
    if status != 0:
        print "Skip..."
        continue

    pandaJob = None
    if pandaIDstatus != {}:
        tmpPandaIDs = pandaIDstatus.keys()
        tmpPandaIDs.sort()
        nIDs = len(tmpPandaIDs)
        nIDs_step = (nIDs + 9) / 10
        for i, tmpID in enumerate(tmpPandaIDs):
            if prog_options.verbose:
                if i % nIDs_step == 0:
                    print i, '/', nIDs
            status, tmpPandaJob = Client.getFullJobStatus([tmpID],
                                                          verbose=verbose2)
            CPUTime = getattr(tmpPandaJob[0], "cpuConsumptionTime")
            startTime = getattr(tmpPandaJob[0], "startTime")
            endTime = getattr(tmpPandaJob[0], "endTime")
            parentID = getattr(tmpPandaJob[0], "parentID")
            jobsetID = getattr(tmpPandaJob[0], "jobsetID")
            if prog_options.verbose:
                print status, getattr(tmpPandaJob[0],
                                      "jobStatus"), CPUTime, startTime, endTime
            if getattr(tmpPandaJob[0],
                       "jobStatus") == "finished" and "runGen" in getattr(
                           tmpPandaJob[0], "transformation"):
                TotalCPUTime += CPUTime
                deltaT = endTime - startTime
                TotalRealTime += deltaT.days * 86400 + deltaT.seconds + deltaT.microseconds * 1e-6
                TotalJobs += 1
예제 #5
0
    for r in res:
        ids.append(r[0])
    print 'got', len(ids), 'new pandaIDs'
    print
except cx_Oracle.DatabaseError, exc:
    error, = exc.args
    print "updatePandaWAN.py - problem in establishing connection to db"
    print "updatePandaWAN.py Oracle-Error-Code:", error.code
    print "updatePandaWAN.py Oracle-Error-Message:", error.message

print 'leaving only first 200 in the list.'
del ids[2000:]


from pandatools import Client
status,jobSpec = Client.getFullJobStatus(ids,False)
print 'got back from panda:', len(jobSpec)

try:
    connection = cx_Oracle.Connection(line)
    cursor = cx_Oracle.Cursor(connection)
    print 'Connection established.'
    for i in jobSpec:
        if i is None:
            print "problem..."
            continue 
        ti=i.pilotTiming.split('|')
        if len(ti)!=5:
            print i.PandaID,i.pilotTiming,ti
            #cursor.execute("DELETE FROM ATLAS_WANHCTEST.result where pandaid=:pid",{'pid':i.PandaID});
            continue
예제 #6
0
 def retry(self,JobsetID,newSite=False,newOpts={},noSubmit=False,ignoreDuplication=False,useJobsetID=False,retryBuild=False,reproduceFiles=[],unsetRetryID=False):
     # get logger
     tmpLog = PLogger.getPandaLogger()
     # check proxy
     self.gridPassPhrase,self.vomsFQAN = PsubUtils.checkGridProxy(
         self.gridPassPhrase,
         False,
         self.verbose,
         useCache=True)
     # force update just in case
     self.status(JobsetID,True)
     # set an empty map since mutable default value is used
     if newOpts == {}:
         newOpts = {}
     # get jobset
     newJobsetID = -1
     jobList = self.getJobIDsWithSetID(JobsetID)
     if jobList == None:
         # works only for jobsetID
         if useJobsetID:
             return
         # works with jobID   
         isJobset = False
         jobList = [JobsetID]
     else:
         isJobset = True
         tmpMsg = "ID=%s is composed of JobID=" % JobsetID
         for tmpJobID in jobList:
             tmpMsg += '%s,' % tmpJobID
         tmpMsg = tmpMsg[:-1]
         tmpLog.info(tmpMsg)
     for JobID in jobList:    
         # get job info from local repository
         localJob = self.getJobInfo(JobID)
         if localJob == None:
             tmpLog.warning("JobID=%s not found in local repository. Synchronization may be needed" % JobID)            
             return None
         # for JEDI
         if localJob.isJEDI():
             status,out = Client.retryTask(
                     localJob.jediTaskID,
                     verbose=self.verbose,
                     properErrorCode=True,
                     newParams=newOpts)
             if status != 0:
                 tmpLog.error(status)
                 tmpLog.error(out)
                 tmpLog.error("Failed to retry TaskID=%s" % localJob.jediTaskID)
                 return False
             tmpStat,tmpDiag = out
             if (not tmpStat in [0,True] and newOpts == {}) or (newOpts != {} and tmpStat != 3):
                 tmpLog.error(tmpDiag)
                 tmpLog.error("Failed to retry TaskID=%s" % localJob.jediTaskID)
                 return False
             tmpLog.info(tmpDiag)
             continue
         # skip running job
         if localJob.dbStatus != 'frozen':
             tmpLog.info('Retry failed subjobs in running jobId=%s' % JobID)
             status,out = Client.retryFailedJobsInActive(JobID,verbose=self.verbose)
             if status != 0:
                 tmpLog.error(status)
                 tmpLog.error(out)
                 tmpLog.error("Failed to retry JobID=%s" % JobID)
             else:
                 job = self.status(JobID)
             if isJobset:
                 continue
             else:
                 return
         # skip already retried
         if localJob.retryID != '0':
             if isJobset:
                 tmpLog.info('Skip JobID=%s since already retried by JobID=%s JobsetID=%s' % \
                             (JobID,localJob.retryID,localJob.retryJobsetID))
                 continue
             else:
                 tmpLog.warning('This job was already retried by JobID=%s' % localJob.retryID)
                 return
         # check status of buildJob
         if not retryBuild and not localJob.buildStatus in ['','finished']:
             tmpMsgStr = 'Cannot retry since status of buildJob %s is %s (!= finished). ' \
                         % (localJob.PandaID.split(',')[0],localJob.buildStatus)
             tmpMsgStr += 'Please execute %s with the same input/output datasets (or containers). ' % localJob.jobType
             tmpMsgStr += 'It will run only on failed/cancelled/unused input files '
             tmpMsgStr += 'and append output files to the output dataset container. '
             tmpMsgStr += 'Or you may set retryBuild=True in pbook.retry() '                
             tmpLog.warning(tmpMsgStr)
             if isJobset:
                 continue
             else:
                 return
         # check opts for newSite
         if newSite or newOpts != {}:
             if not localJob.outDS.endswith('/') and not newOpts.has_key('outDS') and not newOpts.has_key('--outDS'):
                 tmpLog.warning('You need to specify --outDS in newOpts to retry at new site unless container is used as output')
                 return
         # get list of failed jobs
         pandaIDs  = localJob.PandaID.split(',')
         statusList= localJob.jobStatus.split(',')
         jobList = []
         for idx in range(len(pandaIDs)):
             # check status unless reproduce files
             if reproduceFiles == [] and not statusList[idx] in ['failed','cancelled']:
                 continue
             jobList.append(pandaIDs[idx])
         # no failed job
         if jobList == []:
             if isJobset:
                 tmpLog.info('Skip JobID=%s since no failed jobs' % JobID)                    
                 continue
             else:
                 tmpLog.info('No failed jobs to be retried for JobID=%s' % JobID)
                 return
         # get full job spec
         tmpLog.info("Retrying JobID=%s ..." % JobID)
         tmpLog.info("Getting job info")
         idxJL  = 0
         nQuery = 500
         pandaJobs = []
         while idxJL < len(jobList):
             # avoid burst query
             tmpLog.info(" %5s/%s" % (idxJL,len(jobList)))                
             status,oTmp = Client.getFullJobStatus(
                     jobList[idxJL:idxJL+nQuery],
                     verbose=self.verbose)
             if status != 0:
                 tmpLog.error(status)
                 tmpLog.error(oTmp)
                 tmpLog.error("Cannot get job info from Panda server")
                 return
             pandaJobs += oTmp
             idxJL += nQuery
             time.sleep(1)
         tmpLog.info(" %5s/%s" % (len(jobList),len(jobList)))
         # get PandaIDs to reproduce files
         if reproduceFiles != []:
             # change wildcard to .* for regexp
             reproduceFilePatt = []
             for tmpReproduceFile in reproduceFiles:
                 if '*' in tmpReproduceFile:
                     tmpReproduceFile = tmpReproduceFile.replace('*','.*')
                 reproduceFilePatt.append(tmpReproduceFile)
             # get list of jobs which produced interesting files    
             tmpJobList = []
             tmpPandaJobs = []
             for tmpPandaJob in pandaJobs:
                 # check names
                 tmpMatchFlag = False
                 for tmpFile in tmpPandaJob.Files:
                     if tmpFile.type == 'output' and tmpFile.status == 'ready':
                         for tmpReproduceFile in reproduceFilePatt:
                             # normal matching
                             if tmpReproduceFile == tmpFile.lfn:
                                 tmpMatchFlag = True
                                 break
                             # wild card
                             if '*' in tmpReproduceFile and \
                                re.search('^'+tmpReproduceFile,tmpFile.lfn) != None:
                                 tmpMatchFlag = True
                                 break
                         if tmpMatchFlag:
                             break
                 # append
                 if tmpMatchFlag:
                     tmpJobList.append(tmpPandaJob.PandaID)
                     tmpPandaJobs.append(tmpPandaJob)
             # use new list
             jobList = tmpJobList
             pandaJobs = tmpPandaJobs
             if jobList == []:
                 tmpLog.info("No jobs to reproduce files : Jobs in JobID=%s didn't produce lost files" % JobID)
                 continue
         # jobdefID
         newJobdefID = PsubUtils.readJobDefID()
         # reset some parameters
         retryJobs    = []
         retrySite    = None
         retryElement = None
         retryDestSE  = None
         outDsName    = None
         shadowList   = []
         oldLibDS     = None
         newLibDS     = None
         newLibTgz    = None
         rebroMap     = {}
         for idx in range(len(jobList)):
             job = pandaJobs[idx]
             # skip exired
             if job == None:
                 tmpLog.warning("Could not retry jobs older than 30 days : JobID=%s (PandaID=%s) expired" \
                                % (JobID,jobList[idxJob]))
                 return
             # skip jobs reassigned by rebrokerage
             if (job.jobStatus == 'cancelled' and job.taskBufferErrorCode in [105,'105']) or \
                    (job.jobStatus == 'failed' and job.taskBufferErrorCode in [106,'106']):
                 # extract JobIDs of reassigned jobs
                 tmpM = re.search('JobsetID=(\d+) JobID=(\d+)',job.taskBufferErrorDiag)
                 if tmpM != None:
                     tmpRebKey = (tmpM.group(1),tmpM.group(2))
                     if not rebroMap.has_key(tmpRebKey):
                         rebroMap[tmpRebKey] = 0
                     # count # of reassigned jobs
                     rebroMap[tmpRebKey] += 1
                 continue
             # get shadow list
             if (not ignoreDuplication) and outDsName == None and job.prodSourceLabel == 'user':
                 # look for dataset for log since it doesn't have suffix even when --individualOutDS is used
                 for tmpFile in job.Files:
                     if tmpFile.type == 'log':
                         outDsName = tmpFile.dataset
                         break
                 # output dataset was not found    
                 if outDsName == None:
                     tmpLog.error("Could not get output dataset name for JobID=%s (PandaID=%s)" \
                                  % (JobID,job.PandaID))
                     return
                 # get files in shadow
                 if outDsName.endswith('/'):
                     shadowList = Client.getFilesInShadowDataset(
                             outDsName,
                             Client.suffixShadow,
                             self.verbose)
                 else:
                     # disable duplication check mainly for old overlay jobs since non-signal files are wrongly skipped
                     #shadowList = Client.getFilesInShadowDatasetOld(outDsName,Client.suffixShadow,self.verbose)
                     pass
             # unify sitename
             if retrySite == None:
                 retrySite    = job.computingSite
                 retryElement = job.computingElement
                 retryDestSE  = job.destinationSE
             # reset
             job.jobStatus           = None
             job.commandToPilot      = None
             job.startTime           = None
             job.endTime             = None
             job.attemptNr           = 1+job.attemptNr
             for attr in job._attributes:
                 if attr.endswith('ErrorCode') or attr.endswith('ErrorDiag'):
                     setattr(job,attr,None)
             job.transExitCode       = None
             job.computingSite       = retrySite
             job.computingElement    = retryElement
             job.destinationSE       = retryDestSE
             job.dispatchDBlock      = None
             if not unsetRetryID:
                 job.jobExecutionID  = JobID
             job.jobDefinitionID     = newJobdefID
             job.parentID            = job.PandaID
             if job.jobsetID != ['NULL',None,-1]:
                 if not unsetRetryID:
                     job.sourceSite  = job.jobsetID
                 job.jobsetID        = newJobsetID
             skipInputList = []
             numUsedFiles = 0
             # loop over all files    
             for file in job.Files:
                 file.rowID = None
                 if file.type == 'input':
                     # protection against wrong sync which doesn't update buildStatus correctly
                     if not retryBuild and file.lfn.endswith('.lib.tgz') and file.GUID == 'NULL':
                         tmpLog.warning('GUID for %s is unknown. Cannot retry when corresponding buildJob failed' \
                                        % file.lfn)
                         return
                     if not retryBuild or not file.lfn.endswith('.lib.tgz'):
                         file.status = 'ready'
                     # set new lib dataset    
                     if retryBuild and file.lfn.endswith('.lib.tgz'):
                         if newLibTgz != None:
                             file.lfn            = newLibTgz
                             file.dataset        = newLibDS
                             file.dispatchDBlock = newLibDS
                     # check with shadow for non lib.tgz/DBR 
                     tmpDbrMatch = re.search('^DBRelease-.*\.tar\.gz$',file.lfn)
                     if tmpDbrMatch == None and not file.lfn.endswith('.lib.tgz'):
                         if file.lfn in shadowList:
                             skipInputList.append(file)
                         else:
                             numUsedFiles += 1
                 elif file.type in ('output','log'):
                     file.destinationSE = retryDestSE
                     file.destinationDBlock = re.sub('_sub\d+$','',file.destinationDBlock)
                     # add retry num
                     if file.dataset.endswith('/') or job.prodSourceLabel == 'panda':
                         oldOutDsName = file.destinationDBlock
                         retryDsPatt = '_r'
                         if reproduceFiles != []:
                             retryDsPatt = '_rp'
                         retryMatch = re.search(retryDsPatt+'(\d+)$',file.destinationDBlock)
                         if retryMatch == None:
                             file.destinationDBlock += (retryDsPatt+'1')
                         else:
                             tmpDestinationDBlock = re.sub(retryDsPatt+'(\d+)$','',file.destinationDBlock)
                             file.destinationDBlock = tmpDestinationDBlock + retryDsPatt + '%d' % (1+int(retryMatch.group(1)))
                         if job.processingType == 'usermerge':
                             job.jobParameters = job.jobParameters.replace(' %s ' % oldOutDsName,
                                                                           ' %s ' % file.destinationDBlock)
                         # use new dataset name for buildXYZ
                         if job.prodSourceLabel == 'panda':
                             if file.lfn.endswith('.lib.tgz'):
                                 # get new libDS and lib.tgz names
                                 oldLibDS  = file.dataset
                                 file.dataset = file.destinationDBlock
                                 newLibDS = file.dataset
                                 file.lfn = re.sub(oldLibDS,newLibDS,file.lfn)
                                 newLibTgz = file.lfn
                             else:
                                 file.dataset = file.destinationDBlock                                    
                     # add attempt nr
                     oldName  = file.lfn
                     if job.prodSourceLabel == 'panda' and file.lfn.endswith('.lib.tgz'):
                         continue
                     else:
                         # append attempt number at the tail 
                         file.lfn = re.sub("\.\d+$","",file.lfn)
                         file.lfn = "%s.%d" % (file.lfn,job.attemptNr)
                     newName  = file.lfn
                     # modify jobParameters
                     job.jobParameters = re.sub("'%s'" % oldName ,"'%s'" % newName,
                                                job.jobParameters)
                     # look for output in trf
                     oldGenelicName = re.sub('\.\d+$','',oldName)
                     match = re.search(oldGenelicName+'(\.\d+)*(%20|")',job.jobParameters)
                     if match != None:
                         job.jobParameters = job.jobParameters.replace(match.group(0),newName+match.group(2))
             # change lib.tgz name
             if retryBuild and newLibDS != None:
                 job.jobParameters = re.sub(oldLibDS,newLibDS,job.jobParameters)
                 # change destinationDBlock
                 if job.prodSourceLabel == 'panda':
                     job.destinationDBlock = newLibDS
             # all files are used by others
             if numUsedFiles == 0 and skipInputList != []:
                 continue
             # remove skipped files
             strSkipped = ''
             for tmpFile in skipInputList:
                 strSkipped += '%s,' % tmpFile.lfn
                 job.Files.remove(tmpFile)
             strSkipped = strSkipped[:-1]
             # modify jobpar
             if strSkipped != '':
                 optionToSkipFiles = '--skipInputByRetry'
                 if not optionToSkipFiles in job.jobParameters:
                     # just append
                     job.jobParameters += "%s=%s " % (optionToSkipFiles,strSkipped)
                 else:
                     # extract already skipped files
                     tmpMatch = re.search("(%s=[^ ]+)",job.jobParameters)
                     if tmpMatch == None:
                         tmpLog.error("Failed to extract arg of %s for PandaID=%s" \
                                      % (optionToSkipFiles,job.PandaID))
                         return
                     # replace
                     job.jobParameters = re.sub(tmpMatch.group(1),"%s,%s" % (tmpMatch.group(1),optionToSkipFiles),
                                                job.jobParameters)
             if self.verbose:
                 tmpLog.debug(job.jobParameters)
             # append
             retryJobs.append(job)
         # info on rebrokeage    
         if rebroMap != {}:
             for tmpRebKey,tmpRebNumJobs in rebroMap.iteritems():
                 tmpRebSetID,tmpRebJobID = tmpRebKey
                 tmpLog.info('Skip %s jobs since JobID=%s JobsetID=%s already reassigned them to another site' % \
                             (tmpRebNumJobs,tmpRebJobID,tmpRebSetID))
             if retryJobs == []:
                 tmpLog.info("No more jobs to be retried for JobID=%s" % JobID)
                 if isJobset:
                     continue
                 else:
                     return
         # all input files were or are being used by other jobs
         if retryJobs == []:
             tmpLog.info('All input files were or are being used by other jobs for the same output. No jobs to be retried. If you need to ignore duplication check (e.g., using the same EVNT file for multiple simulation subjobs), set ignoreDuplication=True. i.e. retry(123,ignoreDuplication=True)')
             if isJobset:
                 continue
             else:
                 return
         # check voms role
         if not retryJobs[0].workingGroup in ['NULL',None,'']:
             # VOMS role was used 
             if not "--workingGroup" in job.metadata:
                 # extract voms roles from metadata
                 match =  re.search("--voms( |=)[ \"]*([^ \"]+)",job.metadata)
                 if match != None:
                     vomsRoles = match.group(2)
                 else:
                     vomsRoles = "atlas:/atlas/%s/Role=production" % retryJobs[0].workingGroup
             # regenerate proxy with VOMS roles
             try:
                 tmpLog.info("Checking proxy role to resubmit %s jobs" % retryJobs[0].workingGroup)
                 self.gridPassPhrase,self.vomsFQAN = PsubUtils.checkGridProxy(
                         self.gridPassPhrase,
                         False,
                         self.verbose,vomsRoles,
                         useCache=True)
             except:
                 tmpLog.error("Failed to generate a proxy with %s" % vomsRoles)
                 return
         # check runtime env for new site submission
         if (newSite or newOpts != {}):
             if retryJobs[0].processingType == 'pathena' or '--useAthenaPackages' in retryJobs[0].metadata:
                 from pandatools import AthenaUtils
                 stA,retA = AthenaUtils.getAthenaVer()
                 if not stA:
                     tmpLog.error("Failed to get Athena rel/cache version in current runtime env")
                     return
                 athenaVer = retA['athenaVer']
                 cacheVer  = retA['cacheVer']
                 nightVer  = retA['nightVer']
                 wrongSetup = False
                 if retryJobs[0].AtlasRelease != 'Atlas-%s' % athenaVer:
                     wrongSetup = True
                     errMsg =  "Current Athena version Atlas-%s is inconsitent with the previous submission %s. " % (athenaVer,retryJobs[0].AtlasRelease)
                 elif retryJobs[0].homepackage != 'AnalysisTransforms'+cacheVer+nightVer:
                     wrongSetup = True                        
                     errMsg =  "Current cache version %s is inconsitent with the previous submission. " % cacheVer.replace('-','').replace('_','-')
                 if wrongSetup:    
                     errMsg += 'You need to have the same runtime env as before since all job spec need to be re-created to send jobs to a new site. '
                     errMsg += 'Please setup Athena correctly and restart pbook'                        
                     tmpLog.error(errMsg)
                     return
         # test mode
         if noSubmit:
             continue
         # invoke pathena/prun to send job to new site
         if (newSite or newOpts != {}) and retryJobs[0].processingType != 'usermerge':
             # set parent jobID and jobsetID
             newOpts['provenanceID'] = retryJobs[0].jobExecutionID
             newOpts['panda_parentJobsetID'] = retryJobs[0].sourceSite
             tmpLog.info("Constructing job spec again to be sent to another site ...")
             comStat= PsubUtils.execWithModifiedParams(retryJobs,newOpts,self.verbose,newSite)
             if comStat == 0:
                 # update database
                 time.sleep(2)
                 self.sync()
             else:
                 tmpLog.error("Failed to submit jobs to Panda server")                
             return
         # register datasets
         tmpOutDsLocation = Client.PandaSites[retryJobs[-1].computingSite]['ddm']
         addedDataset = []
         shadowDSname = None
         for tmpFile in retryJobs[-1].Files:
             if tmpFile.type in ['output','log'] and tmpFile.dataset.endswith('/'):
                 # add shadow
                 """
                 removed shadow
                 if shadowDSname == None and tmpFile.type == 'log':
                     shadowDSname = "%s%s" % (tmpFile.destinationDBlock,Client.suffixShadow)
                     Client.addDataset(shadowDSname,self.verbose)
                 """    
                 # add datasets    
                 if not tmpFile.destinationDBlock in addedDataset:
                     # create dataset
                     Client.addDataset(
                             tmpFile.destinationDBlock,
                             self.verbose,
                             location=tmpOutDsLocation,
                             dsCheck=False)
                     # add to container
                     Client.addDatasetsToContainer(
                             tmpFile.dataset,
                             [tmpFile.destinationDBlock],
                             self.verbose)
                     # append
                     addedDataset.append(tmpFile.destinationDBlock)
         # register libDS
         if retryBuild and newLibDS != None:
             Client.addDataset(
                     newLibDS,
                     self.verbose,
                     location=tmpOutDsLocation,
                     dsCheck=False)
         # submit
         tmpLog.info("Submitting job ...")            
         status,out = Client.submitJobs(retryJobs,verbose=self.verbose)
         if out == None or status != 0:
             tmpLog.error(status)
             tmpLog.error(out)
             tmpLog.error("Failed to submit jobs to Panda server")
             return
         # update database
         pandaIDstatus = {}
         newJobID = None
         for items in out:
             # get newJobID
             if newJobID == None:
                 newJobID = items[1]
             # check PandaID
             PandaID = items[0]
             if PandaID == 'NULL':
                 tmpLog.error("Panda server returned wrong IDs. It may have a temporary problem")
                 return
             # set newJobsetID
             if newJobsetID in [None,-1]:
                 newJobsetID = items[2]['jobsetID']
             # dummy statuso
             pandaIDstatus[PandaID] = ('defined','NULL')
         # set retry ID
         if not unsetRetryID:
             localJob.retryID = newJobID
             if not newJobsetID in [None,-1,'NULL']:
                 localJob.retryJobsetID = newJobsetID
             try:
                 PdbUtils.updateJobDB(localJob,self.verbose)
             except:
                 tmpLog.error("Failed to set retryID for JobID=%s" % JobID)
                 return
         # set new paramers
         newLocalJob = PdbUtils.convertPtoD(retryJobs,pandaIDstatus)
         newLocalJob.JobID = newJobID
         if not newJobsetID in [None,-1,'NULL']:
             newLocalJob.groupID = newJobsetID
         newLocalJob.creationTime = datetime.datetime.utcnow()
         # insert to DB
         try:
             PdbUtils.insertJobDB(newLocalJob,self.verbose)
         except:
             tmpLog.error("Failed to insert JobID=%s to local repository" % newJobID)
             return
         # write new jobdefID
         PsubUtils.writeJobDefID(newJobID)
         # done
         tmpMsg = 'Done. New JobID=%s' % newJobID
         if not newJobsetID in [None,-1,'NULL']:
             tmpMsg += " JobsetID=%s" % newJobsetID
         tmpLog.info(tmpMsg)
예제 #7
0
 def status(self,JobID,forceUpdate=False):
     # get logger
     tmpLog = PLogger.getPandaLogger()
     # check proxy
     self.gridPassPhrase,self.vomsFQAN = PsubUtils.checkGridProxy(
             self.gridPassPhrase,
             False,
             self.verbose,
             useCache=True)
     # get job info from local repository
     job = self.getJobInfo(JobID)
     if job == None:
         # not found
         return None
     # update if needed
     if job.dbStatus != 'frozen' or forceUpdate:
         if not job.isJEDI():
             tmpLog.info("Getting status for JobID=%s ..." % JobID)
             # get status from Panda server
             status,pandaIDstatus = Client.getPandIDsWithJobID(JobID,verbose=self.verbose)
             if status != 0:
                 tmpLog.error("Failed to get status for ID=%s" % JobID)
                 return None
             # get one job to set computingSite which may have changed due to rebrokerage
             pandaJob = None
             if pandaIDstatus != {}:
                 tmpPandaIDs = pandaIDstatus.keys()
                 tmpPandaIDs.sort()
                 status,tmpPandaJobs = Client.getFullJobStatus(
                         tmpPandaIDs[:1],
                         verbose=self.verbose)
                 if status != 0:
                     tmpLog.error("Failed to get PandaJobs for %s" % JobID)
                     return None
                 pandaJob = tmpPandaJobs[0]
             # convert to local job spec
             job = PdbUtils.convertPtoD([],pandaIDstatus,job,pandaJobForSiteID=pandaJob)
             # check merge job generation
             status = self.setMergeJobStatus(job,forceUpdate)
             if not status:
                 return None
         else:
             tmpLog.info("Getting status for TaskID=%s ..." % job.jediTaskID)
             # get JEDI task
             status,jediTaskDict = Client.getJediTaskDetails(
                     {'jediTaskID':job.jediTaskID},
                     False,
                     True,
                     verbose=self.verbose)
             if status != 0:
                 tmpLog.error("Failed to get task details for %s" % JobID)
                 return
             # convert JEDI task
             job = PdbUtils.convertJTtoD(jediTaskDict,job)
         # update DB
         try:
             PdbUtils.updateJobDB(job,self.verbose)
         except:
             tmpLog.error("Failed to update local repository for JobID=%s" % JobID)
             return None
         if not job.isJEDI():
             tmpLog.info("Updated JobID=%s" % JobID)                        
         else:
             tmpLog.info("Updated TaskID=%s ..." % job.jediTaskID)
     # return
     return job
예제 #8
0
파일: Jedi.py 프로젝트: MannyMoo/ganga
    def master_updateMonitoringInformation(jobs):
        '''Monitor jobs'''       
        from pandatools import Client

        #active_status = [ None, 'defined', 'unknown', 'assigned', 'waiting', 'activated', 'sent', 'starting', 'running', 'holding', 'transferring' ]

        submitting_status = [ ]
        active_status = [ None, 'registered', 'waiting', 'defined', 'pending', 'assigning', 'ready', 'scouting', 'running', 'holding', 'merging', 'prepared', 'aborting', 'finishing' ]
 
        inactive_status = [ 'finished', 'aborted', 'broken', 'failed', 'done' ]

        # Find jobs to be monitored
        jobdict = {}
        for job in jobs:
            # add a delay as Panda can be a little slow in sorting out a new Task
            if job.backend.id and job.backend.status in active_status and ( (datetime.datetime.utcnow() - job.time.timestamps["submitted"]).seconds > 120):
                jobdict[job.backend.id] = job 

        logger.debug("jobdict = %s" %jobdict)
        
        # Monitor active Jedi tasks
        allJobIDs = jobdict.keys()
        pandaJobIDs = {}
        for jID in allJobIDs:
            status, jediTaskDict = Client.getJediTaskDetails({'jediTaskID': jID},False,True,verbose=False)
            if status != 0:
                logger.error("Failed to get task details for %s" % jID)
                #raise BackendError('Jedi','Return code %d retrieving job status information.' % status)
                continue
            # Retrieve job
            job = jobdict[jediTaskDict['jediTaskID']]
            # Store associated Panda jobs
            if job.backend.pandajobs:
                pandaJobIDs[job.backend.id] = [pj.id for pj in job.backend.pandajobs]
            else:
                pandaJobIDs[jediTaskDict['jediTaskID']] = jediTaskDict['PandaID']
            logger.debug("jID = %s, pandaJobIDs = %s" % (jID, pandaJobIDs))

            # Fill the output data dataset list
            if 'outDS' in jediTaskDict and jediTaskDict['outDS'] != '':
                for ds in jediTaskDict['outDS'].split(','):
                    if not ds in job.outputdata.datasetList:
                        job.outputdata.datasetList.append(ds)

            # Jedi job status has changed
            if job.backend.status != jediTaskDict['status']:
                logger.debug('Job %s has changed status from %s to %s',job.getFQID('.'),job.backend.status, jediTaskDict['status'])
                job.backend.status = jediTaskDict['status']
                job.backend.reason = jediTaskDict['statistics']

                # Now update Jedi job status
                if jediTaskDict['status'] in ['registered', 'waiting', 'defined', 'pending', 'assigning', 'ready']:
                    job.updateStatus('submitted')
                elif jediTaskDict['status'] in ['scouting', 'running', 'holding', 'merging', 'prepared' ]:
                    job.updateStatus('running')
                elif jediTaskDict['status'] in ['done']:
                    job.updateStatus('completed')
                elif jediTaskDict['status'] in ['failed', 'finished']:
                    job.updateStatus('failed')
                elif jediTaskDict['status'] in [ 'aborted', 'broken', 'cancelled' ] and job.status not in ['completed','failed']:
                    job.updateStatus('killed')
                else:
                    logger.warning('Unexpected Jedi task status %s', jediTaskDict['status'])

            # Check if associated Panda job exist and monitor them
            if not job.backend.pandajobs:
                jdefids = pandaJobIDs[jID]
                # skip if there are no Panda jobs yet 
                if not jdefids:
                    continue
                tot_num_mjobs = 0

                do_master_update = True
                ick,status,num_mjobs = retrievePandaJobs(job, jdefids)
                logger.debug('retrievePandaJobs returns: %s %s' % (repr(ick),status))
                if not ick:
                    logger.debug('Panda job retrival failure for Jedi task %s with PandaIds %s' % (job.backend.id, jdefids))
                    do_master_update = False

                tot_num_mjobs += num_mjobs
                logger.debug('Job %s retrieved %d Panda jobs' % (job.getFQID('.'),tot_num_mjobs) )
            # Now monitor the already attached Panda jobs
            else:
                jdefids = [ pj.id for pj in job.backend.pandajobs ] 
                rc, jobsStatus = Client.getFullJobStatus(jdefids,False)
                if rc:
                    logger.error('Return code %d retrieving job status information.',rc)
                    raise BackendError('Jedi','Return code %d retrieving job status information.' % rc)

                for status in jobsStatus:
                    if not status: continue

                    for pjob in job.backend.pandajobs:
                        if pjob.id == status.PandaID:
                            # skip if no status change
                            if pjob.status == status.jobStatus:
                                continue 
                            # Else update job record
                            pjob.jobSpec = dict(zip(status._attributes,status.values()))

                            for k in pjob.jobSpec.keys():
                                if type(pjob.jobSpec[k]) not in [type(''),type(1)]:
                                    pjob.jobSpec[k]=str(pjob.jobSpec[k])

                            logger.debug('Job %s with Panda job %s has changed status from %s to %s',job.getFQID('.'),pjob.id, pjob.status,status.jobStatus)
                            pjob.status = status.jobStatus
                            pjob.exitcode = str(status.transExitCode)
                            pjob.piloterrorcode = str(status.pilotErrorCode)
                            pjob.reason = ''
                            for k in pjob.jobSpec.keys():
                                if k.endswith('ErrorDiag') and pjob.jobSpec[k]!='NULL':
                                    pjob.reason += '%s: %s, '%(k,str(pjob.jobSpec[k]))
                            #if job.backend.jobSpec['transExitCode'] != 'NULL':
                            pjob.reason += 'transExitCode: %s'%pjob.jobSpec['transExitCode']

                            if status.jobStatus in ['defined','unknown','assigned','waiting','activated','sent']:
                                logger.debug('Panda job %s %s' % (pjob.id, status.jobStatus))
                            elif status.jobStatus in ['starting','running','holding','transferring', 'merging']:
                                logger.debug('Panda job %s %s '% (pjob.id, status.jobStatus))
                            elif status.jobStatus in ['finished']:
                                logger.debug('Panda job %s %s '% (pjob.id, status.jobStatus))
                            elif status.jobStatus == 'failed':
                                logger.debug('Panda job %s %s '% (pjob.id, status.jobStatus))
                                # check for server side retry
                                if 'taskBufferErrorDiag' in pjob.jobSpec and pjob.jobSpec['taskBufferErrorDiag'].find("PandaID=") != -1:
                                    # grab the new panda ID
                                    newPandaID = long(pjob.jobSpec['taskBufferErrorDiag'].split("=")[1])
                                    pjob.id = newPandaID
                                    pjob.status = None
                                    pjob.url = 'http://panda.cern.ch/?job=%d'%newPandaID
                            elif status.jobStatus == 'cancelled' and pjob.status not in ['completed','failed']: # bug 67716
                                logger.debug('Panda job %s cancelled'%pjob.id)
                                if 'taskBufferErrorDiag' in pjob.jobSpec and "rebrokerage" in pjob.jobSpec['taskBufferErrorDiag']:
                                    newPandaID = checkForRebrokerage(pjob.jobSpec['taskBufferErrorDiag'])
                                    logger.warning("Subjob rebrokered by Panda server. Job %d moved to %d."%(pjob.id, newPandaID))
                                    pjob.id = newPandaID
                                    pjob.status = None
                            else:
                                logger.warning('Unexpected job status %s',status.jobStatus)
예제 #9
0
    def master_updateMonitoringInformation(jobs):
        '''Monitor jobs'''
        from pandatools import Client

        #active_status = [ None, 'defined', 'unknown', 'assigned', 'waiting', 'activated', 'sent', 'starting', 'running', 'holding', 'transferring' ]

        submitting_status = []
        active_status = [
            None, 'registered', 'waiting', 'defined', 'pending', 'assigning',
            'ready', 'scouting', 'running', 'holding', 'merging', 'prepared',
            'aborting', 'finishing'
        ]

        inactive_status = ['finished', 'aborted', 'broken', 'failed', 'done']

        # Find jobs to be monitored
        jobdict = {}
        for job in jobs:
            # add a delay as Panda can be a little slow in sorting out a new Task
            if job.backend.id and job.backend.status in active_status and (
                (datetime.datetime.utcnow() -
                 job.time.timestamps["submitted"]).seconds > 120):
                jobdict[job.backend.id] = job

        logger.debug("jobdict = %s" % jobdict)

        # Monitor active Jedi tasks
        allJobIDs = jobdict.keys()
        pandaJobIDs = {}
        for jID in allJobIDs:
            status, jediTaskDict = Client.getJediTaskDetails(
                {'jediTaskID': jID}, False, True, verbose=False)
            if status != 0:
                logger.error("Failed to get task details for %s" % jID)
                #raise BackendError('Jedi','Return code %d retrieving job status information.' % status)
                continue
            # Retrieve job
            job = jobdict[jediTaskDict['jediTaskID']]
            # Store associated Panda jobs
            if job.backend.pandajobs:
                pandaJobIDs[job.backend.id] = [
                    pj.id for pj in job.backend.pandajobs
                ]
            else:
                pandaJobIDs[
                    jediTaskDict['jediTaskID']] = jediTaskDict['PandaID']
            logger.debug("jID = %s, pandaJobIDs = %s" % (jID, pandaJobIDs))

            # Fill the output data dataset list
            if 'outDS' in jediTaskDict and jediTaskDict['outDS'] != '':
                for ds in jediTaskDict['outDS'].split(','):
                    if not ds in job.outputdata.datasetList:
                        job.outputdata.datasetList.append(ds)

            # Jedi job status has changed
            if job.backend.status != jediTaskDict['status']:
                logger.debug('Job %s has changed status from %s to %s',
                             job.getFQID('.'), job.backend.status,
                             jediTaskDict['status'])
                job.backend.status = jediTaskDict['status']
                job.backend.reason = jediTaskDict['statistics']

                # Now update Jedi job status
                if jediTaskDict['status'] in [
                        'registered', 'waiting', 'defined', 'pending',
                        'assigning', 'ready'
                ]:
                    job.updateStatus('submitted')
                elif jediTaskDict['status'] in [
                        'scouting', 'running', 'holding', 'merging', 'prepared'
                ]:
                    job.updateStatus('running')
                elif jediTaskDict['status'] in ['done']:
                    job.updateStatus('completed')
                elif jediTaskDict['status'] in ['failed', 'finished']:
                    job.updateStatus('failed')
                elif jediTaskDict['status'] in [
                        'aborted', 'broken', 'cancelled'
                ] and job.status not in ['completed', 'failed']:
                    job.updateStatus('killed')
                else:
                    logger.warning('Unexpected Jedi task status %s',
                                   jediTaskDict['status'])

            # Check if associated Panda job exist and monitor them
            if not job.backend.pandajobs:
                jdefids = pandaJobIDs[jID]
                # skip if there are no Panda jobs yet
                if not jdefids:
                    continue
                tot_num_mjobs = 0

                do_master_update = True
                ick, status, num_mjobs = retrievePandaJobs(job, jdefids)
                logger.debug('retrievePandaJobs returns: %s %s' %
                             (repr(ick), status))
                if not ick:
                    logger.debug(
                        'Panda job retrival failure for Jedi task %s with PandaIds %s'
                        % (job.backend.id, jdefids))
                    do_master_update = False

                tot_num_mjobs += num_mjobs
                logger.debug('Job %s retrieved %d Panda jobs' %
                             (job.getFQID('.'), tot_num_mjobs))
            # Now monitor the already attached Panda jobs
            else:
                jdefids = [pj.id for pj in job.backend.pandajobs]
                rc, jobsStatus = Client.getFullJobStatus(jdefids, False)
                if rc:
                    logger.error(
                        'Return code %d retrieving job status information.',
                        rc)
                    raise BackendError(
                        'Jedi',
                        'Return code %d retrieving job status information.' %
                        rc)

                for status in jobsStatus:
                    if not status: continue

                    for pjob in job.backend.pandajobs:
                        if pjob.id == status.PandaID:
                            # skip if no status change
                            if pjob.status == status.jobStatus:
                                continue
                            # Else update job record
                            pjob.jobSpec = dict(
                                zip(status._attributes, status.values()))

                            for k in pjob.jobSpec.keys():
                                if type(pjob.jobSpec[k]) not in [
                                        type(''), type(1)
                                ]:
                                    pjob.jobSpec[k] = str(pjob.jobSpec[k])

                            logger.debug(
                                'Job %s with Panda job %s has changed status from %s to %s',
                                job.getFQID('.'), pjob.id, pjob.status,
                                status.jobStatus)
                            pjob.status = status.jobStatus
                            pjob.exitcode = str(status.transExitCode)
                            pjob.piloterrorcode = str(status.pilotErrorCode)
                            pjob.reason = ''
                            for k in pjob.jobSpec.keys():
                                if k.endswith('ErrorDiag'
                                              ) and pjob.jobSpec[k] != 'NULL':
                                    pjob.reason += '%s: %s, ' % (
                                        k, str(pjob.jobSpec[k]))
                            #if job.backend.jobSpec['transExitCode'] != 'NULL':
                            pjob.reason += 'transExitCode: %s' % pjob.jobSpec[
                                'transExitCode']

                            if status.jobStatus in [
                                    'defined', 'unknown', 'assigned',
                                    'waiting', 'activated', 'sent'
                            ]:
                                logger.debug('Panda job %s %s' %
                                             (pjob.id, status.jobStatus))
                            elif status.jobStatus in [
                                    'starting', 'running', 'holding',
                                    'transferring', 'merging'
                            ]:
                                logger.debug('Panda job %s %s ' %
                                             (pjob.id, status.jobStatus))
                            elif status.jobStatus in ['finished']:
                                logger.debug('Panda job %s %s ' %
                                             (pjob.id, status.jobStatus))
                            elif status.jobStatus == 'failed':
                                logger.debug('Panda job %s %s ' %
                                             (pjob.id, status.jobStatus))
                                # check for server side retry
                                if 'taskBufferErrorDiag' in pjob.jobSpec and pjob.jobSpec[
                                        'taskBufferErrorDiag'].find(
                                            "PandaID=") != -1:
                                    # grab the new panda ID
                                    newPandaID = long(
                                        pjob.jobSpec['taskBufferErrorDiag'].
                                        split("=")[1])
                                    pjob.id = newPandaID
                                    pjob.status = None
                                    pjob.url = 'http://panda.cern.ch/?job=%d' % newPandaID
                            elif status.jobStatus == 'cancelled' and pjob.status not in [
                                    'completed', 'failed'
                            ]:  # bug 67716
                                logger.debug('Panda job %s cancelled' %
                                             pjob.id)
                                if 'taskBufferErrorDiag' in pjob.jobSpec and "rebrokerage" in pjob.jobSpec[
                                        'taskBufferErrorDiag']:
                                    newPandaID = checkForRebrokerage(
                                        pjob.jobSpec['taskBufferErrorDiag'])
                                    logger.warning(
                                        "Subjob rebrokered by Panda server. Job %d moved to %d."
                                        % (pjob.id, newPandaID))
                                    pjob.id = newPandaID
                                    pjob.status = None
                            else:
                                logger.warning('Unexpected job status %s',
                                               status.jobStatus)
예제 #10
0
    def retry(self,
              JobsetID,
              newSite=False,
              newOpts={},
              noSubmit=False,
              ignoreDuplication=True):
        # get logger
        tmpLog = PLogger.getPandaLogger()
        # check proxy
        self.gridPassPhrase, self.vomsFQAN = PsubUtils.checkGridProxy(
            self.gridPassPhrase, False, self.verbose)
        # get jobset
        newJobsetID = -1
        jobList = self.getJobIDsWithSetID(JobsetID)
        if jobList == None:
            isJobset = False
            jobList = [JobsetID]
        else:
            isJobset = True
            tmpMsg = "JobsetID=%s is composed of JobID=" % JobsetID
            for tmpJobID in jobList:
                tmpMsg += '%s,' % tmpJobID
            tmpMsg = tmpMsg[:-1]
            tmpLog.info(tmpMsg)
        for JobID in jobList:
            # get job info from local repository
            localJob = self.getJobInfo(JobID)
            if localJob == None:
                tmpLog.warning(
                    "JobID=%s not found in local repository. Synchronization may be needed"
                    % JobID)
                return None
            # skip running job
            if localJob.dbStatus != 'frozen':
                tmpLog.warning('Cannot retry running jobs')
                if isJobset:
                    continue
                else:
                    return
            # skip already retried
            if localJob.retryID != '0':
                if isJobset:
                    tmpLog.info('Skip JobID=%s since already retried by JobID=%s JobsetID=%s' % \
                                (JobID,localJob.retryID,localJob.retryJobsetID))
                    continue
                else:
                    tmpLog.warning('This job was already retried by JobID=%s' %
                                   localJob.retryID)
                    return
            # check status of buildJob
            if not localJob.buildStatus in ['', 'finished']:
                tmpMsgStr = 'Cannot retry since status of buildJob %s is %s (!= finished). ' \
                            % (localJob.PandaID.split(',')[0],localJob.buildStatus)
                tmpMsgStr += 'Please execute %s with the same input/output datasets (or containers). ' % localJob.jobType
                tmpMsgStr += 'It will run only on failed/cancelled/unused input files '
                tmpMsgStr += 'and append output files to the output dataset container'
                tmpLog.warning(tmpMsgStr)
                if isJobset:
                    continue
                else:
                    return
            # check opts for newSite
            if newSite:
                if not localJob.outDS.endswith('/') and not newOpts.has_key(
                        'outDS') and not newOpts.has_key('--outDS'):
                    tmpLog.warning(
                        'You need to specify --outDS in newOpts to retry at new site unless container is used as output'
                    )
                    return
            # get list of failed jobs
            pandaIDs = localJob.PandaID.split(',')
            statusList = localJob.jobStatus.split(',')
            jobList = []
            for idx in range(len(pandaIDs)):
                # check status
                if not statusList[idx] in ['failed', 'cancelled']:
                    continue
                jobList.append(pandaIDs[idx])
            # no failed job
            if jobList == []:
                if isJobset:
                    tmpLog.info('Skip JobID=%s since no failed jobs' % JobID)
                    continue
                else:
                    tmpLog.info('No failed jobs to be retried for JobID=%s' %
                                JobID)
                    return
            # get full job spec
            tmpLog.info("Retrying JobID=%s ..." % JobID)
            tmpLog.info("Getting job info")
            idxJL = 0
            nQuery = 500
            pandaJobs = []
            while idxJL < len(jobList):
                # avoid burst query
                tmpLog.info(" %5s/%s" % (idxJL, len(jobList)))
                status, oTmp = Client.getFullJobStatus(jobList[idxJL:idxJL +
                                                               nQuery],
                                                       verbose=self.verbose)
                if status != 0:
                    tmpLog.error(status)
                    tmpLog.error(oTmp)
                    tmpLog.error("Cannot get job info from Panda server")
                    return
                pandaJobs += oTmp
                idxJL += nQuery
                time.sleep(1)
            tmpLog.info(" %5s/%s" % (len(jobList), len(jobList)))
            # jobdefID
            newJobdefID = PsubUtils.readJobDefID()
            # reset some parameters
            retryJobs = []
            retrySite = None
            retryElement = None
            retryDestSE = None
            outDsName = None
            shadowList = []
            for idx in range(len(jobList)):
                job = pandaJobs[idx]
                # skip exired
                if job == None:
                    tmpLog.warning("Could not retry jobs older than 30 days : JobID=%s (PandaID=%s) expired" \
                                   % (JobID,jobList[idxJob]))
                    return
                # get shadow list
                if (not ignoreDuplication
                    ) and outDsName == None and job.prodSourceLabel == 'user':
                    # look for dataset for log since it doesn't have suffix even when --individualOutDS is used
                    for tmpFile in job.Files:
                        if tmpFile.type == 'log':
                            outDsName = tmpFile.dataset
                            break
                    # output dataset was not found
                    if outDsName == None:
                        tmpLog.error("Could not get output dataset name for JobID=%s (PandaID=%s)" \
                                     % (JobID,job.PandaID))
                        return
                    # get files in shadow
                    if outDsName.endswith('/'):
                        shadowList = Client.getFilesInShadowDataset(
                            outDsName, Client.suffixShadow, self.verbose)
                    else:
                        # disable duplication check mainly for old overlay jobs since non-signal files are wrongly skipped
                        #shadowList = Client.getFilesInShadowDatasetOld(outDsName,Client.suffixShadow,self.verbose)
                        pass
                # unify sitename
                if retrySite == None:
                    retrySite = job.computingSite
                    retryElement = job.computingElement
                    retryDestSE = job.destinationSE
                # reset
                job.jobStatus = None
                job.commandToPilot = None
                job.startTime = None
                job.endTime = None
                job.attemptNr = 1 + job.attemptNr
                for attr in job._attributes:
                    if attr.endswith('ErrorCode') or attr.endswith(
                            'ErrorDiag'):
                        setattr(job, attr, None)
                job.transExitCode = None
                job.computingSite = retrySite
                job.computingElement = retryElement
                job.destinationSE = retryDestSE
                job.dispatchDBlock = None
                job.jobExecutionID = JobID
                job.jobDefinitionID = newJobdefID
                job.parentID = job.PandaID
                if job.jobsetID != ['NULL', None, -1]:
                    job.sourceSite = job.jobsetID
                    job.jobsetID = newJobsetID
                skipInputList = []
                numUsedFiles = 0
                for file in job.Files:
                    file.rowID = None
                    if file.type == 'input':
                        # protection against wrong sync which doesn't update buildStatus correctly
                        if file.lfn.endswith(
                                '.lib.tgz') and file.GUID == 'NULL':
                            tmpLog.warning('GUID for %s is unknown. Cannot retry when corresponding buildJob failed' \
                                           % file.lfn)
                            return
                        file.status = 'ready'
                        # check with shadow for non lib.tgz/DBR
                        tmpDbrMatch = re.search('^DBRelease-.*\.tar\.gz$',
                                                file.lfn)
                        if tmpDbrMatch == None and not file.lfn.endswith(
                                '.lib.tgz'):
                            if file.lfn in shadowList:
                                skipInputList.append(file)
                            else:
                                numUsedFiles += 1
                    elif file.type in ('output', 'log'):
                        file.destinationSE = retryDestSE
                        file.destinationDBlock = re.sub(
                            '_sub\d+$', '', file.destinationDBlock)
                        # add retry num
                        if file.dataset.endswith('/'):
                            retryMatch = re.search('_r(\d+)$',
                                                   file.destinationDBlock)
                            if retryMatch == None:
                                file.destinationDBlock += '_r1'
                            else:
                                tmpDestinationDBlock = re.sub(
                                    '_r(\d+)$', '', file.destinationDBlock)
                                file.destinationDBlock = tmpDestinationDBlock + '_r%d' % (
                                    1 + int(retryMatch.group(1)))
                        # add attempt nr
                        oldName = file.lfn
                        file.lfn = re.sub("\.\d+$", "", file.lfn)
                        file.lfn = "%s.%d" % (file.lfn, job.attemptNr)
                        newName = file.lfn
                        # modify jobParameters
                        job.jobParameters = re.sub("'%s'" % oldName,
                                                   "'%s'" % newName,
                                                   job.jobParameters)
                        # look for output in trf
                        oldGenelicName = re.sub('\.\d+$', '', oldName)
                        match = re.search(oldGenelicName + '(\.\d+)*(%20|")',
                                          job.jobParameters)
                        if match != None:
                            job.jobParameters = job.jobParameters.replace(
                                match.group(0), newName + match.group(2))
                # all files are used by others
                if numUsedFiles == 0 and skipInputList != []:
                    continue
                # remove skipped files
                strSkipped = ''
                for tmpFile in skipInputList:
                    strSkipped += '%s,' % tmpFile.lfn
                    job.Files.remove(tmpFile)
                strSkipped = strSkipped[:-1]
                # modify jobpar
                if strSkipped != '':
                    optionToSkipFiles = '--skipInputByRetry'
                    if not optionToSkipFiles in job.jobParameters:
                        # just append
                        job.jobParameters += "%s=%s " % (optionToSkipFiles,
                                                         strSkipped)
                    else:
                        # extract already skipped files
                        tmpMatch = re.search("(%s=[^ ]+)", job.jobParameters)
                        if tmpMatch == None:
                            tmpLog.error("Failed to extract arg of %s for PandaID=%s" \
                                         % (optionToSkipFiles,job.PandaID))
                            return
                        # replace
                        job.jobParameters = re.sub(
                            tmpMatch.group(1),
                            "%s,%s" % (tmpMatch.group(1), optionToSkipFiles),
                            job.jobParameters)
                if self.verbose:
                    tmpLog.debug(job.jobParameters)
                # append
                retryJobs.append(job)
            # all input files were or are being used by other jobs
            if retryJobs == []:
                tmpLog.info(
                    'All input files were or are being used by other jobs for the same output. No jobs to be retried. If you need to ignore duplication check (e.g., using the same EVNT file for multiple simulation subjobs), set ignoreDuplication=True. i.e. retry(123,ignoreDuplication=True)'
                )
                return
# check voms role
            if not retryJobs[0].workingGroup in ['NULL', None, '']:
                # VOMS role was used
                if not "--workingGroup" in job.metadata:
                    # extract voms roles from metadata
                    match = re.search("--voms( |=)[ \"]*([^ \"]+)",
                                      job.metadata)
                    if match != None:
                        vomsRoles = match.group(2)
                    else:
                        vomsRoles = "atlas:/atlas/%s/Role=production" % retryJobs[
                            0].workingGroup
# regenerate proxy with VOMS roles
                    try:
                        tmpLog.info("Checking proxy role to resubmit %s jobs" %
                                    retryJobs[0].workingGroup)
                        self.gridPassPhrase, self.vomsFQAN = PsubUtils.checkGridProxy(
                            self.gridPassPhrase, False, self.verbose,
                            vomsRoles)
                    except:
                        tmpLog.error("Failed to generate a proxy with %s" %
                                     vomsRoles)
                        return
            # check runtime env for new site submission
            if newSite:
                if retryJobs[
                        0].processingType == 'pathena' or '--useAthenaPackages' in retryJobs[
                            0].metadata:
                    from pandatools import AthenaUtils
                    stA, retA = AthenaUtils.getAthenaVer()
                    if not stA:
                        tmpLog.error(
                            "Failed to get Athena rel/cache version in current runtime env"
                        )
                        return
                    athenaVer = retA['athenaVer']
                    cacheVer = retA['cacheVer']
                    nightVer = retA['nightVer']
                    wrongSetup = False
                    if retryJobs[0].AtlasRelease != 'Atlas-%s' % athenaVer:
                        wrongSetup = True
                        errMsg = "Current Athena version Atlas-%s is inconsitent with the previous submission %s. " % (
                            athenaVer, retryJobs[0].AtlasRelease)
                    elif retryJobs[
                            0].homepackage != 'AnalysisTransforms' + cacheVer + nightVer:
                        wrongSetup = True
                        errMsg = "Current cache version %s is inconsitent with the previous submission. " % cacheVer.replace(
                            '-', '').replace('_', '-')
                    if wrongSetup:
                        errMsg += 'You need to have the same runtime env as before since all job spec need to be re-created to send jobs to a new site. '
                        errMsg += 'Please setup Athena correctly and restart pbook'
                        tmpLog.error(errMsg)
                        return
# test mode
            if noSubmit:
                continue
            # invoke pathena/prun to send job to new site
            if newSite:
                tmpLog.info(
                    "Constrcuting job spec again to be sent to another site ..."
                )
                comStat = PsubUtils.execWithModifiedParams(
                    retryJobs, newOpts, self.verbose)
                if comStat == 0:
                    # update database
                    time.sleep(2)
                    self.sync()
                else:
                    tmpLog.error("Failed to submit jobs to Panda server")
                return
            # register datasets
            tmpOutDsLocation = Client.PandaSites[
                retryJobs[-1].computingSite]['ddm']
            addedDataset = []
            shadowDSname = None
            for tmpFile in retryJobs[-1].Files:
                if tmpFile.type in ['output', 'log'
                                    ] and tmpFile.dataset.endswith('/'):
                    # add shadow
                    if shadowDSname == None and tmpFile.type == 'log':
                        shadowDSname = "%s%s" % (tmpFile.destinationDBlock,
                                                 Client.suffixShadow)
                        Client.addDataset(shadowDSname, self.verbose)
                    # add datasets
                    if not tmpFile.destinationDBlock in addedDataset:
                        # create dataset
                        Client.addDataset(tmpFile.destinationDBlock,
                                          self.verbose,
                                          location=tmpOutDsLocation)
                        # add to container
                        Client.addDatasetsToContainer(
                            tmpFile.dataset, [tmpFile.destinationDBlock],
                            self.verbose)
                        # append
                        addedDataset.append(tmpFile.destinationDBlock)
            # submit
            tmpLog.info("Submitting job ...")
            status, out = Client.submitJobs(retryJobs, verbose=self.verbose)
            if out == None or status != 0:
                tmpLog.error(status)
                tmpLog.error(out)
                tmpLog.error("Failed to submit jobs to Panda server")
                return
            # update database
            pandaIDstatus = {}
            newJobID = None
            for items in out:
                # get newJobID
                if newJobID == None:
                    newJobID = items[1]
                # check PandaID
                PandaID = items[0]
                if PandaID == 'NULL':
                    tmpLog.error(
                        "Panda server returned wrong IDs. It may have a temporary problem"
                    )
                    return
                # set newJobsetID
                if newJobsetID in [None, -1]:
                    newJobsetID = items[2]['jobsetID']
                # dummy statuso
                pandaIDstatus[PandaID] = ('defined', 'NULL')
            # set retry ID
            localJob.retryID = newJobID
            if not newJobsetID in [None, -1, 'NULL']:
                localJob.retryJobsetID = newJobsetID
            try:
                PdbUtils.updateJobDB(localJob, self.verbose)
            except:
                tmpLog.error("Failed to set retryID for JobID=%s" % JobID)
                return
            # set new paramers
            newLocalJob = PdbUtils.convertPtoD(retryJobs, pandaIDstatus)
            newLocalJob.JobID = newJobID
            if not newJobsetID in [None, -1, 'NULL']:
                newLocalJob.groupID = newJobsetID
            newLocalJob.creationTime = datetime.datetime.utcnow()
            # insert to DB
            try:
                PdbUtils.insertJobDB(newLocalJob, self.verbose)
            except:
                tmpLog.error("Failed to insert JobID=%s to local repository" %
                             newJobID)
                return
            # write new jobdefID
            PsubUtils.writeJobDefID(newJobID)
            # done
            tmpMsg = 'Done. New JobID=%s' % newJobID
            if not newJobsetID in [None, -1, 'NULL']:
                tmpMsg += " JobsetID=%s" % newJobsetID
            tmpLog.info(tmpMsg)
예제 #11
0
    def sync(self):
        # get logger
        tmpLog = PLogger.getPandaLogger()
        tmpLog.info("Synchronizing local repository ...")
        # check proxy
        self.gridPassPhrase, self.vomsFQAN = PsubUtils.checkGridProxy(
            self.gridPassPhrase, False, self.verbose)
        # get JobIDs in local repository
        localJobIDs = PdbUtils.getListOfJobIDs()
        # get recent JobIDs from panda server
        syncTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
        # set sync time for the first attempt
        bookConf = BookConfig.getConfig()
        if self.restoreDB:
            # reset last_synctime to restore database
            bookConf.last_synctime = ''
            # disable
            self.restoreDB = False
            tmpLog.info(
                "It may take several minutes to restore local repository ...")
        if bookConf.last_synctime == '':
            bookConf.last_synctime = datetime.datetime.utcnow(
            ) - datetime.timedelta(days=180)
            bookConf.last_synctime = bookConf.last_synctime.strftime(
                '%Y-%m-%d %H:%M:%S')
        status, remoteJobIDs = Client.getJobIDsInTimeRange(
            bookConf.last_synctime, verbose=self.verbose)
        if status != 0:
            tmpLog.error("Failed to get JobIDs from panda server")
            return
        tmpLog.info("Got %s jobs to be updated" % len(remoteJobIDs))
        # insert if missing
        for remoteJobID in remoteJobIDs:
            # check local status
            job = None
            if remoteJobID in localJobIDs:
                # get job info from local repository
                job = PdbUtils.readJobDB(remoteJobID, self.verbose)
                # skip if frozen
                if job.dbStatus == 'frozen':
                    continue
            tmpLog.info("Updating JobID=%s ..." % remoteJobID)
            # get PandaIDs
            status, pandaIDstatus = Client.getPandIDsWithJobID(
                remoteJobID, verbose=self.verbose)
            if status != 0:
                tmpLog.error("Failed to get PandaIDs for %s" % remoteJobID)
                return
            pandaIDs = pandaIDstatus.keys()
            pandaIDs.sort()
            # get full JobSpec
            pandaJobs = []
            pandaFileInfo = {}
            pandaJobForSiteID = None
            if job == None:
                tmpIDs = [pandaIDs[0], pandaIDs[-1]]
                status, pandaJobs = Client.getFullJobStatus(
                    tmpIDs, verbose=self.verbose)
                if status != 0:
                    tmpLog.error("Failed to get PandaJobs for %s" %
                                 remoteJobID)
                    return
# get slimmed file info
                status, pandaFileInfo = Client.getSlimmedFileInfoPandaIDs(
                    pandaIDs, verbose=self.verbose)
                if status != 0:
                    tmpLog.error("Failed to get file info  for %s" %
                                 remoteJobID)
                    return
            else:
                # get one job to set computingSite which may have changed due to rebrokerage
                status, tmpPandaJobs = Client.getFullJobStatus(
                    [pandaIDs[0]], verbose=self.verbose)
                if status != 0:
                    tmpLog.error("Failed to get PandaJobs for %s" %
                                 remoteJobID)
                    return
                pandaJobForSiteID = tmpPandaJobs[0]
            # convert to local job spec
            localJob = PdbUtils.convertPtoD(pandaJobs, pandaIDstatus, job,
                                            pandaFileInfo, pandaJobForSiteID)
            # update database
            if not remoteJobID in localJobIDs:
                # insert to DB
                try:
                    PdbUtils.insertJobDB(localJob, self.verbose)
                except:
                    tmpLog.error(
                        "Failed to insert JobID=%s to local repository" %
                        remoteJobID)
                    return
                # set retryID
                if not localJob.provenanceID in [0, '0']:
                    try:
                        PdbUtils.setRetryID(localJob, self.verbose)
                    except:
                        tmpLog.error(
                            "Failed to set retryID for JobID=%s in local repository"
                            % remoteJobID)
                        return
            else:
                # update
                try:
                    PdbUtils.updateJobDB(localJob, self.verbose)
                except:
                    tmpLog.error(
                        "Failed to update local repository for JobID=%s" %
                        remoteJobID)
                    return
        # update sync time
        bookConf = BookConfig.getConfig()
        bookConf.last_synctime = syncTime
        BookConfig.updateConfig(bookConf)
        tmpLog.info("Synchronization Completed")