Esempio n. 1
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order
        glblTags = [
            x['Conditions']
            for x in getOutputDatasetsWithPSet(pnode, sorted=True)
        ]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted=True),
                                      glblTags):

            dataset['Conditions'] = globalTag

            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)

            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(
                dataset, self.apiRef)
            DBSWriterObjects.createProcessedDataset(primary, mergeAlgo,
                                                    dataset, self.apiRef)

            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(inputDataset, processedDataset,
                                            mergeAlgo)

            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)

            logging.debug("ProcessedDataset: %s" % processedDataset)
            logging.debug("inputDataset: %s" % inputDataset)
            logging.debug("mergeAlgo: %s" % mergeAlgo)
        return
Esempio n. 2
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order 
        glblTags = [x['Conditions'] for x in getOutputDatasetsWithPSet(pnode,
                                                                sorted = True)]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted = True),
                                      glblTags):
            
            dataset['Conditions'] = globalTag
            
            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)
            
            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(dataset,
                                                              self.apiRef)
            DBSWriterObjects.createProcessedDataset(
                primary, mergeAlgo, dataset, self.apiRef)
            
            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(
                inputDataset, processedDataset, mergeAlgo)
            
            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)
            
            logging.debug("ProcessedDataset: %s"%processedDataset)
            logging.debug("inputDataset: %s"%inputDataset)
            logging.debug("mergeAlgo: %s"%mergeAlgo)
        return
Esempio n. 3
0
class _CreateDatasetOperator:
    """
    _CreateDatasetOperator_

    Operator for creating datasets from a workflow node

    """
    def __init__(self, apiRef, workflow):
        self.apiRef = apiRef
        self.workflow = workflow

    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return
        datasets = getOutputDatasetsWithPSet(pnode, sorted=True)
        cfgMeta = None
        try:
            cfgInt = pnode.cfgInterface
            cfgMeta = cfgInt.configMetadata
            cfgMeta['Type'] = self.workflow.parameters["RequestCategory"]
        except Exception, ex:
            msg = "Unable to Extract cfg data from workflow"
            msg += str(ex)
            logging.error(msg)
            return

        for dataset in datasets:
            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)
            algo = DBSWriterObjects.createAlgorithm(dataset, cfgMeta,
                                                    self.apiRef)

            processed = DBSWriterObjects.createProcessedDataset(
                primary, algo, dataset, self.apiRef)

        return
datasetStrmr = jobReportFile.newDataset()
datasetStrmr['PrimaryDataset'] = primaryDataset
datasetStrmr['PrimaryDatasetType'] = 'data'
datasetStrmr['ProcessedDataset'] = processedDataset
datasetStrmr['DataTier'] = dataTier

jobReportFile['TotalEvents'] = nEvents
jobReportFile['SEName'] = "srm.cern.ch"

##jobReport.write('FrameworkJobReport.xml')

localDbsUrl = "https://cmst0dbs.cern.ch:8443/cms_dbs_prod_tier0_writer/servlet/DBSServlet"

dbswriter = DBSWriter(localDbsUrl,level='ERROR')

primary = DBSWriterObjects.createPrimaryDataset(datasetStrmr, dbswriter.dbs)

datasetStrmr['ApplicationName'] = appName
datasetStrmr['ApplicationVersion'] = appVersion
datasetStrmr['ApplicationFamily'] = 'DAQ'
datasetStrmr['PSetHash'] = 'NA'
datasetStrmr['PSetContent'] = 'NA'

algo = DBSWriterObjects.createAlgorithm(datasetStrmr, None, dbswriter.dbs)

processed = DBSWriterObjects.createProcessedDataset(primary, algo, datasetStrmr, dbswriter.dbs)

try:
    blocks = dbswriter.insertFiles(jobReport, insertDetectorData = True)
except DBSWriterError, ex:
    print "%s"%ex
Esempio n. 5
0
    def manageFileBlock(self, fileblockName, maxFiles = 100, maxSize = None):
        """
        _manageFileBlock_

        Check to see wether the fileblock with the provided name
        is closeable based on number of files or total size.

        If the block equals or exceeds wither the maxFiles or maxSize
        parameters, close the block and return True, else do nothing and
        return False

        """
        #  //
        # // Check that the block exists, and is open before we close it
        #//
        blockInstance = self.dbs.listBlocks(block_name=fileblockName)
        if len(blockInstance) > 1:
            msg = "Multiple Blocks matching name: %s\n" % fileblockName
            msg += "Unable to manage file block..."
            raise DBSWriterError(msg)

        if len(blockInstance) == 0:
            msg = "Block name %s not found\n" % fileblockName
            msg += "Cant manage a non-existent fileblock"
            raise DBSWriterError(msg)
        blockInstance = blockInstance[0]
        isClosed = blockInstance.get('OpenForWriting', '1')
        if isClosed == "0":
            msg = "Block %s already closed" % fileblockName
            logging.warning(msg)
            return False

        
        
        #  //
        # // We have an open block, sum number of files and file sizes
        #//
        
        fileCount = float(blockInstance.get('NumberOfFiles', 0))
        totalSize = float(blockInstance.get('BlockSize', 0))
        
        msg = "Fileblock: %s\n ==> Size: %s Files: %s\n" % (
            fileblockName, totalSize, fileCount)
        logging.warning(msg)

        #  //
        # // Test close block conditions
        #//
        closeBlock = False
        if fileCount >= maxFiles:
            closeBlock = True
            msg = "Closing Block Based on files: %s" % fileblockName
            logging.debug(msg)
            
        if maxSize != None:
            if totalSize >= maxSize:
                closeBlock = True
                msg = "Closing Block Based on size: %s" % fileblockName
                logging.debug(msg)
                

        if closeBlock:
            #  //
            # // Close the block
            #//
            self.dbs.closeBlock(
                DBSWriterObjects.createDBSFileBlock(fileblockName)
                )
        return closeBlock
Esempio n. 6
0
    def insertFiles(self, fwkJobRep, insertDetectorData = False):
        """
        _insertFiles_

        Process the files in the FwkJobReport instance and insert
        them into the associated datasets

        A list of affected fileblock names is returned both for merged 
        and unmerged fileblocks. Only merged blocks will have to be managed. 
        #for merged file
        #blocks to facilitate management of those blocks.
        #This list is not populated for processing jobs since we dont really
        #care about the processing job blocks.

        """

        insertLists = {}
        orderedHashes = []
        affectedBlocks = set()

        if len(fwkJobRep.files)<=0:
           msg = "Error in DBSWriter.insertFiles\n"
           msg += "No files found in FrameWorkJobReport for:\n"
           msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
           msg += " Workflow: %s"%fwkJobRep.workflowSpecId
           raise DBSWriterError(msg)


        for outFile in fwkJobRep.sortFiles():
            #  //
            # // Convert each file into a DBS File object
            #//
            seName = None
            if outFile.has_key("SEName"):
               if outFile['SEName'] :
                  seName = outFile['SEName']
                  logging.debug("SEname associated to file is: %s"%seName)
## remove the fallback to site se-name if no SE is associated to File
## because it's likely that there is some stage out problem if there
## is no SEName associated to the file.
#            if not seName:
#                if fwkJobRep.siteDetails.has_key("se-name"):
#                   seName = fwkJobRep.siteDetails['se-name']
#                   seName = str(seName)
#                   logging.debug("site SEname: %s"%seName) 
            if not seName:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to files in FrameWorkJobReport for "
#                msg += "No SEname found in FrameWorkJobReport for "
                msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
                msg += " Workflow: %s"%fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)
            try:
                if ( insertDetectorData ):
                    dbsFiles = DBSWriterObjects.createDBSFiles(outFile,
                                                               fwkJobRep.jobType,
                                                               self.dbs)
                else:
                    dbsFiles = DBSWriterObjects.createDBSFiles(outFile,
                                                               fwkJobRep.jobType)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles:\n"
                msg += "Error creating DbsFile instances for file:\n"
                msg += "%s\n" % outFile['LFN']
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if len(dbsFiles)<=0:
               msg="No DbsFile instances created. Not enough info in the FrameWorkJobReport for"
               msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
               msg += " Workflow: %s"%fwkJobRep.workflowSpecId
               raise DBSWriterError(msg)  

            for f in dbsFiles:
                datasetName = makeDBSDSName(f)
                hashName = "%s-%s" % (seName, datasetName)
                
                if not insertLists.has_key(hashName):
                    insertLists[hashName] = _InsertFileList(seName,
                                                            datasetName)
                insertLists[hashName].append(f)
                
                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)
Esempio n. 7
0
    def insertFilesForDBSBuffer(self, files, procDataset, algos, jobType = "NotMerge", insertDetectorData = False):
        """
        _insertFiles_

        list of files inserted in DBS
        """
        #TODO: Whats the purpose of insertDetectorData

	if len(files) < 1: 
		return 
        affectedBlocks = set()
        insertFiles =  []
        addedRuns=[]
        seName = None
        
        #Get the algos in insertable form
        ialgos = [DBSWriterObjects.createAlgorithmForInsert(dict(algo)) for algo in algos ]
       
        for outFile in files:
            #  //
            # // Convert each file into a DBS File object
            #//
            lumiList = []

	    #Somehing similar should be the real deal when multiple runs/lumi could be returned from wmbs file

            for runlumiinfo in outFile.getRuns():
                lrun=long(runlumiinfo.run)
                run = DbsRun(
                    RunNumber = lrun,
                    NumberOfEvents = 0,
                    NumberOfLumiSections = 0,
                    TotalLuminosity = 0,
                    StoreNumber = 0,
                    StartOfRun = 0,
                    EndOfRun = 0,
                    )
                #Only added if not added by another file in this loop, why waste a call to DBS
                if lrun not in addedRuns:
                	self.dbs.insertRun(run)
                    	addedRuns.append(lrun) #save it so we do not try to add it again to DBS
			logging.debug("run %s added to DBS " % str(lrun))
                for alsn in runlumiinfo:    
                	lumi = DbsLumiSection(
                    		LumiSectionNumber = long(alsn),
                    		StartEventNumber = 0,
                    		EndEventNumber = 0,
                    		LumiStartTime = 0,
                    		LumiEndTime = 0,
                    		RunNumber = lrun,
                	)
                	lumiList.append(lumi)

            logging.debug("lumi list created for the file")

            dbsfile = DbsFile(
                              Checksum = str(outFile['cksum']),
                              NumberOfEvents = outFile['events'],
                              LogicalFileName = outFile['lfn'],
                              FileSize = int(outFile['size']),
                              Status = "VALID",
                              ValidationStatus = 'VALID',
                              FileType = 'EDM',
                              Dataset = procDataset,
                              TierList = DBSWriterObjects.makeTierList(procDataset['Path'].split('/')[3]),
                              AlgoList = ialgos,
                              LumiList = lumiList,
                              ParentList = outFile.getParentLFNs(),
                              #BranchHash = outFile['BranchHash'],
                            )
            #This check comes from ProdAgent, not sure if its required
            if len(outFile["locations"]) > 0:
                  seName = list(outFile["locations"])[0]
                  logging.debug("SEname associated to file is: %s"%seName)
            else:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to file"
                #print "FAKING seName for now"
		#seName="cmssrm.fnal.gov"
                raise DBSWriterError(msg)
            insertFiles.append(dbsfile)
        #  //Processing Jobs: 
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks
       
        try:
            fileBlock = DBSWriterObjects.getDBSFileBlock(
                    self.dbs,
                    procDataset,
                    seName)
        except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "Cannot retrieve FileBlock for dataset:\n"
                msg += " %s\n" % procDataset['Path']
                #msg += "In Storage Element:\n %s\n" % insertFiles.seName
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Esempio n. 8
0
                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)
            

        #  //Processing Jobs: 
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks

        for hash in orderedHashes:
            
            fileList = insertLists[hash]
            procDataset = fileList[0]['Dataset']

            try:
                fileBlock = DBSWriterObjects.getDBSFileBlock(
                    self.dbs,
                    procDataset,
                    fileList.seName)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "Cannot retrieve FileBlock for dataset:\n"
                msg += " %s\n" % procDataset
                msg += "In Storage Element:\n %s\n" % fileList.seName
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if fwkJobRep.jobType == "Merge":
                #  //
                # // Merge files
                #//
                for mergedFile in fileList:
                    mergedFile['Block'] = fileBlock
Esempio n. 9
0
    def manageFileBlock(self, fileblockName, maxFiles=100, maxSize=None):
        """
        _manageFileBlock_

        Check to see wether the fileblock with the provided name
        is closeable based on number of files or total size.

        If the block equals or exceeds wither the maxFiles or maxSize
        parameters, close the block and return True, else do nothing and
        return False

        """
        #  //
        # // Check that the block exists, and is open before we close it
        #//
        blockInstance = self.dbs.listBlocks(block_name=fileblockName)
        if len(blockInstance) > 1:
            msg = "Multiple Blocks matching name: %s\n" % fileblockName
            msg += "Unable to manage file block..."
            raise DBSWriterError(msg)

        if len(blockInstance) == 0:
            msg = "Block name %s not found\n" % fileblockName
            msg += "Cant manage a non-existent fileblock"
            raise DBSWriterError(msg)
        blockInstance = blockInstance[0]
        isClosed = blockInstance.get('OpenForWriting', '1')
        if isClosed == "0":
            msg = "Block %s already closed" % fileblockName
            logging.warning(msg)
            return False

        #  //
        # // We have an open block, sum number of files and file sizes
        #//

        fileCount = float(blockInstance.get('NumberOfFiles', 0))
        totalSize = float(blockInstance.get('BlockSize', 0))

        msg = "Fileblock: %s\n ==> Size: %s Files: %s\n" % (
            fileblockName, totalSize, fileCount)
        logging.warning(msg)

        #  //
        # // Test close block conditions
        #//
        closeBlock = False
        if fileCount >= maxFiles:
            closeBlock = True
            msg = "Closing Block Based on files: %s" % fileblockName
            logging.debug(msg)

        if maxSize != None:
            if totalSize >= maxSize:
                closeBlock = True
                msg = "Closing Block Based on size: %s" % fileblockName
                logging.debug(msg)

        if closeBlock:
            #  //
            # // Close the block
            #//
            self.dbs.closeBlock(
                DBSWriterObjects.createDBSFileBlock(fileblockName))
        return closeBlock
Esempio n. 10
0
    def insertFiles(self, fwkJobRep, insertDetectorData=False):
        """
        _insertFiles_

        Process the files in the FwkJobReport instance and insert
        them into the associated datasets

        A list of affected fileblock names is returned both for merged 
        and unmerged fileblocks. Only merged blocks will have to be managed. 
        #for merged file
        #blocks to facilitate management of those blocks.
        #This list is not populated for processing jobs since we dont really
        #care about the processing job blocks.

        """

        insertLists = {}
        orderedHashes = []
        affectedBlocks = set()

        if len(fwkJobRep.files) <= 0:
            msg = "Error in DBSWriter.insertFiles\n"
            msg += "No files found in FrameWorkJobReport for:\n"
            msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
            msg += " Workflow: %s" % fwkJobRep.workflowSpecId
            raise DBSWriterError(msg)

        for outFile in fwkJobRep.sortFiles():
            #  //
            # // Convert each file into a DBS File object
            #//
            seName = None
            if outFile.has_key("SEName"):
                if outFile['SEName']:
                    seName = outFile['SEName']
                    logging.debug("SEname associated to file is: %s" % seName)
## remove the fallback to site se-name if no SE is associated to File
## because it's likely that there is some stage out problem if there
## is no SEName associated to the file.
#            if not seName:
#                if fwkJobRep.siteDetails.has_key("se-name"):
#                   seName = fwkJobRep.siteDetails['se-name']
#                   seName = str(seName)
#                   logging.debug("site SEname: %s"%seName)
            if not seName:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to files in FrameWorkJobReport for "
                #                msg += "No SEname found in FrameWorkJobReport for "
                msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
                msg += " Workflow: %s" % fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)
            try:
                if (insertDetectorData):
                    dbsFiles = DBSWriterObjects.createDBSFiles(
                        outFile, fwkJobRep.jobType, self.dbs)
                else:
                    dbsFiles = DBSWriterObjects.createDBSFiles(
                        outFile, fwkJobRep.jobType)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles:\n"
                msg += "Error creating DbsFile instances for file:\n"
                msg += "%s\n" % outFile['LFN']
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if len(dbsFiles) <= 0:
                msg = "No DbsFile instances created. Not enough info in the FrameWorkJobReport for"
                msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
                msg += " Workflow: %s" % fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)

            for f in dbsFiles:
                datasetName = makeDBSDSName(f)
                hashName = "%s-%s" % (seName, datasetName)

                if not insertLists.has_key(hashName):
                    insertLists[hashName] = _InsertFileList(
                        seName, datasetName)
                insertLists[hashName].append(f)

                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)
Esempio n. 11
0
    def insertFilesForDBSBuffer(self,
                                files,
                                procDataset,
                                algos,
                                jobType="NotMerge",
                                insertDetectorData=False):
        """
        _insertFiles_

        list of files inserted in DBS
        """
        #TODO: Whats the purpose of insertDetectorData

        if len(files) < 1:
            return
        affectedBlocks = set()
        insertFiles = []
        addedRuns = []
        seName = None

        #Get the algos in insertable form
        ialgos = [
            DBSWriterObjects.createAlgorithmForInsert(dict(algo))
            for algo in algos
        ]

        for outFile in files:
            #  //
            # // Convert each file into a DBS File object
            #//
            lumiList = []

            #Somehing similar should be the real deal when multiple runs/lumi could be returned from wmbs file

            for runlumiinfo in outFile.getRuns():
                lrun = long(runlumiinfo.run)
                run = DbsRun(
                    RunNumber=lrun,
                    NumberOfEvents=0,
                    NumberOfLumiSections=0,
                    TotalLuminosity=0,
                    StoreNumber=0,
                    StartOfRun=0,
                    EndOfRun=0,
                )
                #Only added if not added by another file in this loop, why waste a call to DBS
                if lrun not in addedRuns:
                    self.dbs.insertRun(run)
                    addedRuns.append(
                        lrun)  #save it so we do not try to add it again to DBS
                    logging.debug("run %s added to DBS " % str(lrun))
                for alsn in runlumiinfo:
                    lumi = DbsLumiSection(
                        LumiSectionNumber=long(alsn),
                        StartEventNumber=0,
                        EndEventNumber=0,
                        LumiStartTime=0,
                        LumiEndTime=0,
                        RunNumber=lrun,
                    )
                    lumiList.append(lumi)

            logging.debug("lumi list created for the file")

            dbsfile = DbsFile(
                Checksum=str(outFile['cksum']),
                NumberOfEvents=outFile['events'],
                LogicalFileName=outFile['lfn'],
                FileSize=int(outFile['size']),
                Status="VALID",
                ValidationStatus='VALID',
                FileType='EDM',
                Dataset=procDataset,
                TierList=DBSWriterObjects.makeTierList(
                    procDataset['Path'].split('/')[3]),
                AlgoList=ialgos,
                LumiList=lumiList,
                ParentList=outFile.getParentLFNs(),
                #BranchHash = outFile['BranchHash'],
            )
            #This check comes from ProdAgent, not sure if its required
            if len(outFile["locations"]) > 0:
                seName = list(outFile["locations"])[0]
                logging.debug("SEname associated to file is: %s" % seName)
            else:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to file"
                #print "FAKING seName for now"
                #seName="cmssrm.fnal.gov"
                raise DBSWriterError(msg)
            insertFiles.append(dbsfile)
        #  //Processing Jobs:
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks

        try:
            fileBlock = DBSWriterObjects.getDBSFileBlock(
                self.dbs, procDataset, seName)
        except DbsException, ex:
            msg = "Error in DBSWriter.insertFiles\n"
            msg += "Cannot retrieve FileBlock for dataset:\n"
            msg += " %s\n" % procDataset['Path']
            #msg += "In Storage Element:\n %s\n" % insertFiles.seName
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Esempio n. 12
0
                insertLists[hashName].append(f)

                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)

        #  //Processing Jobs:
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks

        for hash in orderedHashes:

            fileList = insertLists[hash]
            procDataset = fileList[0]['Dataset']

            try:
                fileBlock = DBSWriterObjects.getDBSFileBlock(
                    self.dbs, procDataset, fileList.seName)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "Cannot retrieve FileBlock for dataset:\n"
                msg += " %s\n" % procDataset
                msg += "In Storage Element:\n %s\n" % fileList.seName
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if fwkJobRep.jobType == "Merge":
                #  //
                # // Merge files
                #//
                for mergedFile in fileList:
                    mergedFile['Block'] = fileBlock
                    affectedBlocks.add(fileBlock['Name'])