コード例 #1
0
def main():
    logger.info('Getting tasks with status send and running')
    #    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running'))
    tasks_list = Task.objects.all().filter(name='TestTaskBW')
    logger.info('Got list of %s tasks' % len(tasks_list))

    for t in tasks_list:
        logger.info('Getting jobs in status defined or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        i = 0
        for j in jobs_list:
            if i >= max_send_amount:
                break

            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'local'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)

            job = JobSpec()
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = 'hello world'
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            job.prodSourceLabel = 'test'
            job.computingSite = site
            job.attemptNr = 1
            job.maxAttempt = 5
            job.sourceSite = 'BW_COMPASS_MCORE'
            job.VO = 'local'

            # logs, and all files generated during execution will be placed in log (except output file)
            job.jobParameters = 'python /u/sciteam/petrosya/panda/hello.py'

            fileOLog = FileSpec()
            fileOLog.lfn = "log.job.log.tgz"
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            logger.info(o)
            #             for x in o:
            #                 logger.info("PandaID=%s" % x[0])
            #                 today = datetime.datetime.today()
            #
            #                 if x[0] != 0 and x[0] != 'NULL':
            #                     j_update = Job.objects.get(id=j.id)
            #                     j_update.panda_id = x[0]
            #                     j_update.status = 'sent'
            #                     j_update.attempt = j_update.attempt + 1
            #                     j_update.date_updated = today
            #
            #                     try:
            #                         j_update.save()
            #                         logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today))
            #                     except IntegrityError as e:
            #                         logger.exception('Unique together catched, was not saved')
            #                     except DatabaseError as e:
            #                         logger.exception('Something went wrong while saving: %s' % e.message)
            #                 else:
            #                     logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
コード例 #2
0
def main():
    logger.info('Getting tasks with status send and running')
    #    tasks_list = Task.objects.all().filter(Q(status='send') | Q(status='running'))
    tasks_list = Task.objects.all().filter(name='dvcs2016P09t2r13v1_mu+')
    logger.info('Got list of %s tasks' % len(tasks_list))

    for t in tasks_list:
        logger.info('Getting jobs in status defined or failed for task %s' % t)
        jobs_list_count = Job.objects.all().filter(task=t).count()
        if jobs_list_count > 50:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:max_send_amount]
        else:
            jobs_list = Job.objects.all().filter(
                task=t).order_by('id')[:jobs_list_count]
        logger.info('Got list of %s jobs' % len(jobs_list))

        i = 0
        for j in jobs_list:
            if i >= max_send_amount:
                break

            logger.info('Going to send job %s of %s task' %
                        (j.file, j.task.name))

            umark = commands.getoutput('uuidgen')
            datasetName = 'panda.destDB.%s' % umark
            destName = 'COMPASSPRODDISK'  # PanDA will not try to move output data, data will be placed by pilot (based on schedconfig)
            TMPRAWFILE = j.file[j.file.rfind('/') + 1:]
            logger.info(TMPRAWFILE)
            TMPMDSTFILE = 'mDST-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.root' % {
                'input_file': j.file,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            logger.info(TMPMDSTFILE)
            TMPHISTFILE = '%(runNumber)s-%(runChunk)s-%(prodSlt)s.root' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(TMPHISTFILE)
            TMPRICHFILE = 'gfile_%(runNumber)s-%(runChunk)s.gfile' % {
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(TMPRICHFILE)
            EVTDUMPFILE = 'evtdump%(prodSlt)s-%(runChunk)s-%(runNumber)s.raw' % {
                'prodSlt': j.task.prodslt,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number
            }
            logger.info(EVTDUMPFILE)
            STDOUTFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stdout' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDOUTFILE)
            STDERRFILE = '%(prodNameOnly)s.%(runNumber)s-%(runChunk)s-%(prodSlt)s.stderr' % {
                'prodNameOnly': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt
            }
            logger.info(STDERRFILE)
            try:
                file_year = j.file.split('/')[5]
                logger.info(file_year)
            except:
                logger.error('Error while splitting file to get year')
                sys.exit(1)

            ProdPathAndName = j.task.home + j.task.path + j.task.soft

            job = JobSpec()
            job.taskID = j.task.id
            job.jobDefinitionID = 0
            job.jobName = '%(prodName)s-%(fileYear)s--%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s' % {
                'prodName': j.task.soft,
                'fileYear': file_year,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            job.transformation = j.task.type  # payload (can be URL as well)
            job.destinationDBlock = datasetName
            job.destinationSE = destName
            job.currentPriority = 2000
            job.prodSourceLabel = 'prod_test'
            job.computingSite = site
            job.attemptNr = j.attempt + 1
            job.maxAttempt = j.task.max_attempts
            if j.status == 'failed':
                job.parentID = j.panda_id
            head, tail = os.path.split(j.file)
            #            job.transferType = 'direct'
            job.sourceSite = 'CERN_COMPASS_PROD'

            # logs, and all files generated during execution will be placed in log (except output file)
            #job.jobParameters='source /afs/cern.ch/project/eos/installation/compass/etc/setup.sh;export EOS_MGM_URL=root://eoscompass.cern.ch;export PATH=/afs/cern.ch/project/eos/installation/compass/bin:$PATH;ppwd=$(pwd);echo $ppwd;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;coralpath=%(ProdPathAndName)s/coral;echo $coralpath;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";echo $coralpathsetup;source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/template.opt;xrdcp -np $ppwd/%(TMPMDSTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/mDST/%(TMPMDSTFILE)s;xrdcp -np $ppwd/%(TMPHISTFILE)s xroot://eoscompass.cern.ch//eos/compass/%(prodName)s/histos/%(TMPHISTFILE)s;metadataxml=$(ls metadata-*);echo $metadataxml;cp $metadataxml $metadataxml.PAYLOAD;' % {'TMPMDSTFILE': TMPMDSTFILE, 'TMPHISTFILE': TMPHISTFILE, 'TMPRICHFILE': TMPRICHFILE, 'input_file': input_file, 'ProdPathAndName': ProdPathAndName, 'prodName': prodName}
            job.jobParameters = 'export EOS_MGM_URL=root://eoscompass.cern.ch;ppwd=$(pwd);export COMPASS_SW_PREFIX=/eos/experiment/compass/;export COMPASS_SW_PATH=%(prodPath)s;export COMPASS_PROD_NAME=%(prodName)s;export TMPRAWFILE=%(TMPRAWFILE)s;export TMPMDSTFILE=%(TMPMDSTFILE)s;export TMPHISTFILE=%(TMPHISTFILE)s;export TMPRICHFILE=%(TMPRICHFILE)s;export prodSlt=%(prodSlt)s;export EVTDUMPFILE=%(EVTDUMPFILE)s;xrdcp -N -f root://castorpublic.cern.ch/%(input_file)s\?svcClass=compasscdr .;coralpath=%(ProdPathAndName)s/coral;cd -P $coralpath;export coralpathsetup=$coralpath"/setup.sh";source $coralpathsetup;cd $ppwd;$CORAL/../phast/coral/coral.exe %(ProdPathAndName)s/%(template)s;if [ ! -s testevtdump.raw ]; then echo "PanDA message: the file is empty">testevtdump.raw; fi;cp payload_stderr.txt payload_stderr.out;cp payload_stdout.txt payload_stdout.out;rm %(tail)s' % {
                'TMPRAWFILE': TMPRAWFILE,
                'TMPMDSTFILE': TMPMDSTFILE,
                'TMPHISTFILE': TMPHISTFILE,
                'TMPRICHFILE': TMPRICHFILE,
                'input_file': j.file,
                'ProdPathAndName': ProdPathAndName,
                'prodPath': j.task.path,
                'prodName': j.task.soft,
                'template': j.task.template,
                'tail': tail,
                'prodSlt': j.task.prodslt,
                'EVTDUMPFILE': EVTDUMPFILE,
                'STDOUTFILE': STDOUTFILE,
                'STDERRFILE': STDERRFILE
            }

            fileIRaw = FileSpec()
            fileIRaw.lfn = "%s" % (j.file)
            fileIRaw.GUID = '5874a461-61d3-4543-8f34-6fd7a4624e78'
            fileIRaw.fsize = 1073753368
            fileIRaw.checksum = '671608be'
            fileIRaw.destinationDBlock = job.destinationDBlock
            fileIRaw.destinationSE = job.destinationSE
            fileIRaw.dataset = job.destinationDBlock
            fileIRaw.type = 'input'
            job.addFile(fileIRaw)

            fileOstdout = FileSpec()
            fileOstdout.lfn = "payload_stdout.txt"
            fileOstdout.destinationDBlock = job.destinationDBlock
            fileOstdout.destinationSE = job.destinationSE
            fileOstdout.dataset = job.destinationDBlock
            fileOstdout.type = 'output'
            job.addFile(fileOstdout)

            fileOstderr = FileSpec()
            fileOstderr.lfn = "payload_stderr.txt"
            fileOstderr.destinationDBlock = job.destinationDBlock
            fileOstderr.destinationSE = job.destinationSE
            fileOstderr.dataset = job.destinationDBlock
            fileOstderr.type = 'output'
            job.addFile(fileOstderr)

            fileOLog = FileSpec()
            fileOLog.lfn = "%(prodName)s-%(runNumber)s-%(runChunk)s-%(prodSlt)s-%(phastVer)s.job.log.tgz" % {
                'prodName': j.task.soft,
                'runNumber': j.run_number,
                'runChunk': j.chunk_number,
                'prodSlt': j.task.prodslt,
                'phastVer': j.task.phastver
            }
            fileOLog.destinationDBlock = job.destinationDBlock
            fileOLog.destinationSE = job.destinationSE
            fileOLog.dataset = job.destinationDBlock
            fileOLog.type = 'log'
            job.addFile(fileOLog)

            fileOmDST = FileSpec()
            fileOmDST.lfn = "%s" % (TMPMDSTFILE)
            fileOmDST.destinationDBlock = job.destinationDBlock
            fileOmDST.destinationSE = job.destinationSE
            fileOmDST.dataset = job.destinationDBlock
            fileOmDST.type = 'output'
            job.addFile(fileOmDST)

            fileOTrafdic = FileSpec()
            fileOTrafdic.lfn = "%s" % (TMPHISTFILE)
            fileOTrafdic.destinationDBlock = job.destinationDBlock
            fileOTrafdic.destinationSE = job.destinationSE
            fileOTrafdic.dataset = job.destinationDBlock
            fileOTrafdic.type = 'output'
            job.addFile(fileOTrafdic)

            fileOtestevtdump = FileSpec()
            fileOtestevtdump.lfn = "testevtdump.raw"
            fileOtestevtdump.destinationDBlock = job.destinationDBlock
            fileOtestevtdump.destinationSE = job.destinationSE
            fileOtestevtdump.dataset = job.destinationDBlock
            fileOtestevtdump.type = 'output'
            job.addFile(fileOtestevtdump)

            s, o = Client.submitJobs([job], srvID=aSrvID)
            logger.info(s)
            logger.info(o)
            #             for x in o:
            #                 logger.info("PandaID=%s" % x[0])
            #                 today = datetime.datetime.today()
            #
            #                 if x[0] != 0 and x[0] != 'NULL':
            #                     j_update = Job.objects.get(id=j.id)
            #                     j_update.panda_id = x[0]
            #                     j_update.status = 'sent'
            #                     j_update.attempt = j_update.attempt + 1
            #                     j_update.date_updated = today
            #
            #                     try:
            #                         j_update.save()
            #                         logger.info('Job %s with PandaID %s updated at %s' % (j.id, x[0], today))
            #                     except IntegrityError as e:
            #                         logger.exception('Unique together catched, was not saved')
            #                     except DatabaseError as e:
            #                         logger.exception('Something went wrong while saving: %s' % e.message)
            #                 else:
            #                     logger.info('Job %s was not added to PanDA' % j.id)
            i += 1

    logger.info('done')
コード例 #3
0
ファイル: Closer.py プロジェクト: EntityOfPlague/panda-server
 def run(self):
     try:
         _logger.debug('%s Start %s' % (self.pandaID,self.job.jobStatus))
         flagComplete    = True
         ddmJobs         = []
         topUserDsList   = []
         usingMerger     = False        
         disableNotifier = False
         firstIndvDS     = True
         finalStatusDS   = []
         for destinationDBlock in self.destinationDBlocks:
             dsList = []
             _logger.debug('%s start %s' % (self.pandaID,destinationDBlock))
             # ignore tid datasets
             if re.search('_tid[\d_]+$',destinationDBlock):
                 _logger.debug('%s skip %s' % (self.pandaID,destinationDBlock))                
                 continue
             # ignore HC datasets
             if re.search('^hc_test\.',destinationDBlock) != None or re.search('^user\.gangarbt\.',destinationDBlock) != None:
                 if re.search('_sub\d+$',destinationDBlock) == None and re.search('\.lib$',destinationDBlock) == None:
                     _logger.debug('%s skip HC %s' % (self.pandaID,destinationDBlock))                
                     continue
             # query dataset
             if self.datasetMap.has_key(destinationDBlock):
                 dataset = self.datasetMap[destinationDBlock]
             else:
                 dataset = self.taskBuffer.queryDatasetWithMap({'name':destinationDBlock})
             if dataset == None:
                 _logger.error('%s Not found : %s' % (self.pandaID,destinationDBlock))
                 flagComplete = False
                 continue
             # skip tobedeleted/tobeclosed 
             if dataset.status in ['cleanup','tobeclosed','completed']:
                 _logger.debug('%s skip %s due to %s' % (self.pandaID,destinationDBlock,dataset.status))
                 continue
             dsList.append(dataset)
             # sort
             dsList.sort()
             # count number of completed files
             notFinish = self.taskBuffer.countFilesWithMap({'destinationDBlock':destinationDBlock,
                                                            'status':'unknown'})
             if notFinish < 0:
                 _logger.error('%s Invalid DB return : %s' % (self.pandaID,notFinish))
                 flagComplete = False                
                 continue
             # check if completed
             _logger.debug('%s notFinish:%s' % (self.pandaID,notFinish))
             if self.job.destinationSE == 'local' and self.job.prodSourceLabel in ['user','panda']:
                 # close non-DQ2 destinationDBlock immediately
                 finalStatus = 'closed'
             elif self.job.lockedby == 'jedi' and self.isTopLevelDS(destinationDBlock):
                 # set it closed in order not to trigger DDM cleanup. It will be closed by JEDI
                 finalStatus = 'closed'
             elif self.job.prodSourceLabel in ['user'] and "--mergeOutput" in self.job.jobParameters \
                      and self.job.processingType != 'usermerge':
                 # merge output files
                 if firstIndvDS:
                     # set 'tobemerged' to only the first dataset to avoid triggering many Mergers for --individualOutDS
                     finalStatus = 'tobemerged'
                     firstIndvDS = False
                 else:
                     finalStatus = 'tobeclosed'
                 # set merging to top dataset
                 usingMerger = True
                 # disable Notifier
                 disableNotifier = True
             elif self.job.produceUnMerge():
                 finalStatus = 'doing'
             else:
                 # set status to 'tobeclosed' to trigger DQ2 closing
                 finalStatus = 'tobeclosed'
             if notFinish==0: 
                 _logger.debug('%s set %s to dataset : %s' % (self.pandaID,finalStatus,destinationDBlock))
                 # set status
                 dataset.status = finalStatus
                 # update dataset in DB
                 retT = self.taskBuffer.updateDatasets(dsList,withLock=True,withCriteria="status<>:crStatus AND status<>:lockStatus ",
                                                       criteriaMap={':crStatus':finalStatus,':lockStatus':'locked'})
                 if len(retT) > 0 and retT[0]==1:
                     finalStatusDS += dsList
                     # close user datasets
                     if self.job.prodSourceLabel in ['user'] and self.job.destinationDBlock.endswith('/') \
                            and (dataset.name.startswith('user') or dataset.name.startswith('group')):
                         # get top-level user dataset 
                         topUserDsName = re.sub('_sub\d+$','',dataset.name)
                         # update if it is the first attempt
                         if topUserDsName != dataset.name and not topUserDsName in topUserDsList and self.job.lockedby != 'jedi':
                             topUserDs = self.taskBuffer.queryDatasetWithMap({'name':topUserDsName})
                             if topUserDs != None:
                                 # check status
                                 if topUserDs.status in ['completed','cleanup','tobeclosed',
                                                         'tobemerged','merging']:
                                     _logger.debug('%s skip %s due to status=%s' % (self.pandaID,topUserDsName,topUserDs.status))
                                 else:
                                     # set status
                                     if self.job.processingType.startswith('gangarobot') or \
                                            self.job.processingType.startswith('hammercloud'):
                                         # not trigger freezing for HC datasets so that files can be appended
                                         topUserDs.status = 'completed'
                                     elif not usingMerger:
                                         topUserDs.status = finalStatus
                                     else:
                                         topUserDs.status = 'merging'
                                     # append to avoid repetition
                                     topUserDsList.append(topUserDsName)
                                     # update DB
                                     retTopT = self.taskBuffer.updateDatasets([topUserDs],withLock=True,withCriteria="status<>:crStatus",
                                                                              criteriaMap={':crStatus':topUserDs.status})
                                     if len(retTopT) > 0 and retTopT[0]==1:
                                         _logger.debug('%s set %s to top dataset : %s' % (self.pandaID,topUserDs.status,topUserDsName))
                                     else:
                                         _logger.debug('%s failed to update top dataset : %s' % (self.pandaID,topUserDsName))
                         # get parent dataset for merge job
                         if self.job.processingType == 'usermerge':
                             tmpMatch = re.search('--parentDS ([^ \'\"]+)',self.job.jobParameters)
                             if tmpMatch == None:
                                 _logger.error('%s failed to extract parentDS' % self.pandaID)
                             else:
                                 unmergedDsName = tmpMatch.group(1)
                                 # update if it is the first attempt
                                 if not unmergedDsName in topUserDsList:
                                     unmergedDs = self.taskBuffer.queryDatasetWithMap({'name':unmergedDsName})
                                     if unmergedDs == None:
                                         _logger.error('%s failed to get parentDS=%s from DB' % (self.pandaID,unmergedDsName))
                                     else:
                                         # check status
                                         if unmergedDs.status in ['completed','cleanup','tobeclosed']:
                                             _logger.debug('%s skip %s due to status=%s' % (self.pandaID,unmergedDsName,unmergedDs.status))
                                         else:
                                             # set status
                                             unmergedDs.status = finalStatus
                                             # append to avoid repetition
                                             topUserDsList.append(unmergedDsName)
                                             # update DB
                                             retTopT = self.taskBuffer.updateDatasets([unmergedDs],withLock=True,withCriteria="status<>:crStatus",
                                                                                      criteriaMap={':crStatus':unmergedDs.status})
                                             if len(retTopT) > 0 and retTopT[0]==1:
                                                 _logger.debug('%s set %s to parent dataset : %s' % (self.pandaID,unmergedDs.status,unmergedDsName))
                                             else:
                                                 _logger.debug('%s failed to update parent dataset : %s' % (self.pandaID,unmergedDsName))
                     if self.pandaDDM and self.job.prodSourceLabel=='managed':
                         # instantiate SiteMapper
                         if self.siteMapper == None:
                             self.siteMapper = SiteMapper(self.taskBuffer)
                         # get file list for PandaDDM
                         retList = self.taskBuffer.queryFilesWithMap({'destinationDBlock':destinationDBlock})
                         lfnsStr = ''
                         guidStr = ''
                         for tmpFile in retList:
                             if tmpFile.type in ['log','output']:
                                 lfnsStr += '%s,' % tmpFile.lfn
                                 guidStr += '%s,' % tmpFile.GUID
                         if lfnsStr != '':
                             guidStr = guidStr[:-1]
                             lfnsStr = lfnsStr[:-1]
                             # create a DDM job
                             ddmjob = JobSpec()
                             ddmjob.jobDefinitionID   = int(time.time()) % 10000
                             ddmjob.jobName           = "%s" % commands.getoutput('uuidgen')
                             ddmjob.transformation    = 'http://pandaserver.cern.ch:25080/trf/mover/run_dq2_cr'
                             ddmjob.destinationDBlock = 'testpanda.%s' % ddmjob.jobName
                             ddmjob.computingSite     = "BNL_ATLAS_DDM"
                             ddmjob.destinationSE     = ddmjob.computingSite
                             ddmjob.currentPriority   = 200000
                             ddmjob.prodSourceLabel   = 'ddm'
                             ddmjob.transferType      = 'sub'
                             # append log file
                             fileOL = FileSpec()
                             fileOL.lfn = "%s.job.log.tgz" % ddmjob.jobName
                             fileOL.destinationDBlock = ddmjob.destinationDBlock
                             fileOL.destinationSE     = ddmjob.destinationSE
                             fileOL.dataset           = ddmjob.destinationDBlock
                             fileOL.type = 'log'
                             ddmjob.addFile(fileOL)
                             # make arguments
                             dstDQ2ID = 'BNLPANDA'
                             srcDQ2ID = self.siteMapper.getSite(self.job.computingSite).ddm
                             callBackURL = 'https://%s:%s/server/panda/datasetCompleted?vuid=%s&site=%s' % \
                                           (panda_config.pserverhost,panda_config.pserverport,
                                            dataset.vuid,dstDQ2ID)
                             _logger.debug(callBackURL)
                             # set src/dest
                             ddmjob.sourceSite      = srcDQ2ID
                             ddmjob.destinationSite = dstDQ2ID
                             # if src==dst, send callback without ddm job
                             if dstDQ2ID == srcDQ2ID:
                                 comout = commands.getoutput('curl -k %s' % callBackURL)
                                 _logger.debug(comout)
                             else:
                                 # run dq2_cr
                                 callBackURL = urllib.quote(callBackURL)
                                 # get destination dir
                                 destDir = brokerage.broker_util._getDefaultStorage(self.siteMapper.getSite(self.job.computingSite).dq2url)
                                 argStr = "-s %s -r %s --guids %s --lfns %s --callBack %s -d %s/%s %s" % \
                                          (srcDQ2ID,dstDQ2ID,guidStr,lfnsStr,callBackURL,destDir,
                                           destinationDBlock,destinationDBlock)
                                 # set job parameters
                                 ddmjob.jobParameters = argStr
                                 _logger.debug('%s pdq2_cr %s' % (self.pandaID,ddmjob.jobParameters))
                                 ddmJobs.append(ddmjob)
                     # start Activator
                     if re.search('_sub\d+$',dataset.name) == None:
                         if self.job.prodSourceLabel=='panda' and self.job.processingType in ['merge','unmerge']:
                             # don't trigger Activator for merge jobs
                             pass
                         else:
                             if self.job.jobStatus == 'finished':
                                 aThr = Activator(self.taskBuffer,dataset)
                                 aThr.start()
                                 aThr.join()
                 else:
                     # unset flag since another thread already updated 
                     #flagComplete = False
                     pass
             else:
                 # update dataset in DB
                 self.taskBuffer.updateDatasets(dsList,withLock=True,withCriteria="status<>:crStatus AND status<>:lockStatus ",
                                                criteriaMap={':crStatus':finalStatus,':lockStatus':'locked'})
                 # unset flag
                 flagComplete = False
             # end
             _logger.debug('%s end %s' % (self.pandaID,destinationDBlock))
         # start DDM jobs
         if ddmJobs != []:
             self.taskBuffer.storeJobs(ddmJobs,self.job.prodUserID,joinThr=True)
         # change pending jobs to failed
         finalizedFlag = True
         if flagComplete and self.job.prodSourceLabel=='user':
             _logger.debug('%s finalize %s %s' % (self.pandaID,self.job.prodUserName,self.job.jobDefinitionID))
             finalizedFlag = self.taskBuffer.finalizePendingJobs(self.job.prodUserName,self.job.jobDefinitionID,waitLock=True)
             _logger.debug('%s finalized with %s' % (self.pandaID,finalizedFlag))
         # update unmerged datasets in JEDI to trigger merging
         if flagComplete and self.job.produceUnMerge() and finalStatusDS != []:
             if finalizedFlag:
                 self.taskBuffer.updateUnmergedDatasets(self.job,finalStatusDS)
         # start notifier
         _logger.debug('%s source:%s complete:%s' % (self.pandaID,self.job.prodSourceLabel,flagComplete))
         if (self.job.jobStatus != 'transferring') and ((flagComplete and self.job.prodSourceLabel=='user') or \
            (self.job.jobStatus=='failed' and self.job.prodSourceLabel=='panda')) and \
            self.job.lockedby != 'jedi':
             # don't send email for merge jobs
             if (not disableNotifier) and not self.job.processingType in ['merge','unmerge']:
                 useNotifier = True
                 summaryInfo = {}
                 # check all jobDefIDs in jobsetID
                 if not self.job.jobsetID in [0,None,'NULL']:
                     useNotifier,summaryInfo = self.taskBuffer.checkDatasetStatusForNotifier(self.job.jobsetID,self.job.jobDefinitionID,
                                                                                             self.job.prodUserName)
                     _logger.debug('%s useNotifier:%s' % (self.pandaID,useNotifier))
                 if useNotifier:
                     _logger.debug('%s start Notifier' % self.pandaID)
                     nThr = Notifier.Notifier(self.taskBuffer,self.job,self.destinationDBlocks,summaryInfo)
                     nThr.run()
                     _logger.debug('%s end Notifier' % self.pandaID)                    
         _logger.debug('%s End' % self.pandaID)
     except:
         errType,errValue = sys.exc_info()[:2]
         _logger.error("%s %s" % (errType,errValue))