def archiveRequestAndOp(listOfLFNs): """Return a tuple of the request and operation.""" req = Request() req.RequestName = "MyRequest" op = Operation() switches = {} archiveLFN = "/vo/tars/myTar.tar" op.Arguments = DEncode.encode({ "SourceSE": switches.get("SourceSE", "SOURCE-SE"), "TarballSE": switches.get("TarballSE", "TARBALL-SE"), "RegisterDescendent": False, "ArchiveLFN": archiveLFN, }) op.Type = "ArchiveFiles" for index, lfn in enumerate(listOfLFNs): oFile = File() oFile.LFN = lfn oFile.Size = index oFile.Checksum = "01130a%0d" % index oFile.ChecksumType = "adler32" op.addFile(oFile) req.addOperation(op) return req, op
def test02props(self): """ test properties """ # # valid values operation = Operation() operation.OperationID = 1 self.assertEqual(operation.OperationID, 1, "wrong OperationID") operation.OperationID = "1" self.assertEqual(operation.OperationID, 1, "wrong OperationID") operation.Arguments = "foobar" self.assertEqual(operation.Arguments, "foobar", "wrong Arguments") operation.SourceSE = "CERN-RAW" self.assertEqual(operation.SourceSE, "CERN-RAW", "wrong SourceSE") operation.TargetSE = "CERN-RAW" self.assertEqual(operation.TargetSE, "CERN-RAW", "wrong TargetSE") operation.Catalog = "" self.assertEqual(operation.Catalog, "", "wrong Catalog") operation.Catalog = "BookkeepingDB" self.assertEqual(operation.Catalog, "BookkeepingDB", "wrong Catalog") operation.Error = "error" self.assertEqual(operation.Error, "error", "wrong Error") # # wrong props try: operation.RequestID = "foo" except Exception, error: self.assertEqual(type(error), AttributeError, "wrong exc raised") self.assertEqual(str(error), "can't set attribute", "wrong exc reason")
def test02props( self ): """ test properties """ # # valid values operation = Operation() operation.Arguments = "foobar" self.assertEqual( operation.Arguments, "foobar", "wrong Arguments" ) operation.SourceSE = "CERN-RAW" self.assertEqual( operation.SourceSE, "CERN-RAW", "wrong SourceSE" ) operation.TargetSE = "CERN-RAW" self.assertEqual( operation.TargetSE, "CERN-RAW", "wrong TargetSE" ) operation.Catalog = "" self.assertEqual( operation.Catalog, "", "wrong Catalog" ) operation.Catalog = "BookkeepingDB" self.assertEqual( operation.Catalog, "BookkeepingDB", "wrong Catalog" ) operation.Error = "error" self.assertEqual( operation.Error, "error", "wrong Error" ) # # wrong props try: operation.RequestID = "foo" except Exception, error: self.assertEqual( type( error ), AttributeError, "wrong exc raised" ) self.assertEqual( str( error ), "can't set attribute", "wrong exc reason" )
def archiveRequestAndOp(listOfLFNs): """Return a tuple of the request and operation.""" req = Request() req.RequestName = 'MyRequest' op = Operation() switches = {} archiveLFN = '/vo/tars/myTar.tar' op.Arguments = DEncode.encode({ 'SourceSE': switches.get('SourceSE', 'SOURCE-SE'), 'TarballSE': switches.get('TarballSE', 'TARBALL-SE'), 'RegisterDescendent': False, 'ArchiveLFN': archiveLFN }) op.Type = 'ArchiveFiles' for index, lfn in enumerate(listOfLFNs): oFile = File() oFile.LFN = lfn oFile.Size = index oFile.Checksum = '01130a%0d' % index oFile.ChecksumType = 'adler32' op.addFile(oFile) req.addOperation(op) return req, op
def taskToRequest(self, taskID, task, transID): """Convert a task into an RMS with a single ForwardDISET Operation whose attribute is the number of files in the task """ req = Request() op = Operation() op.Type = "ForwardDISET" op.Arguments = json.dumps(len(task["InputData"]) * self.factor) req.addOperation(op) return req
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() commitOp = None if not result['OK']: # Generate Request commitOp = Operation() commitOp.Type = 'SetFileStatus' commitOp.Arguments = DEncode.encode( {'transformation':self.transformation, 'statusDict':self.statusDict, 'force':self.force} ) return S_OK( commitOp )
def _sendToFailover( rpcStub ): """ Create a ForwardDISET operation for failover """ request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % ( time.time(), random.random() ) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) request.addOperation( forwardDISETOp ) return ReqClient().putRequest( request )
def generateForwardDISET(self): """ Commit the accumulated records and generate request eventually """ result = self.commit() commitOp = None if not result['OK']: # Generate Request commitOp = Operation() commitOp.Type = 'SetFileStatus' commitOp.Arguments = DEncode.encode({'transformation': self.transformation, 'statusDict': self.statusDict, 'force': self.force}) return S_OK(commitOp)
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() forwardDISETOp = None if not result['OK']: # Generate Request if "FailedResults" in result: for res in result['FailedResults']: if 'rpcStub' in res: forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( res['rpcStub'] ) return S_OK( forwardDISETOp )
def generateForwardDISET(self): """Commit the accumulated records and generate request eventually""" result = self.commit() commitOp = None if not result["OK"]: # Generate Request commitOp = Operation() commitOp.Type = "SetFileStatus" commitOp.Arguments = DEncode.encode({ "transformation": self.transformation, "statusDict": self.statusDict, "force": self.force }) return S_OK(commitOp)
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() forwardDISETOp = None if not result['OK']: # Generate Request if result.has_key( 'rpcStub' ): forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( result['rpcStub'] ) else: return S_ERROR( 'Could not create ForwardDISET operation' ) return S_OK( forwardDISETOp )
def _sendToFailover(rpcStub): """Create a ForwardDISET operation for failover""" try: request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % (time.time(), random.random()) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) request.addOperation(forwardDISETOp) return ReqClient().putRequest(request) # We catch all the exceptions, because it should never crash except Exception as e: # pylint: disable=broad-except return S_ERROR(ERMSUKN, "Exception sending accounting failover request: %s" % repr(e))
def _sendToFailover( rpcStub ): """ Create a ForwardDISET operation for failover """ try: request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % ( time.time(), random.random() ) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) request.addOperation( forwardDISETOp ) return ReqClient().putRequest( request ) # We catch all the exceptions, because it should never crash except Exception as e: # pylint: disable=broad-except return S_ERROR( ERMSUKN, "Exception sending accounting failover request: %s" % repr( e ) )
def generateForwardDISET( self ): """ Generate and return failover requests for the operations in the internal cache """ fowardDISETOp = None result = self.sendStoredStatusInfo() if not result['OK']: if 'rpcStub' in result: rpcStub = result['rpcStub'] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) else: return S_ERROR( 'Could not create ForwardDISET operation' ) return S_OK( fowardDISETOp )
def generateForwardDISET(self): """ Generate and return failover requests for the operations in the internal cache """ forwardDISETOp = None result = self.sendStoredStatusInfo() if not result['OK']: if 'rpcStub' in result: rpcStub = result['rpcStub'] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR('Could not create ForwardDISET operation') return S_OK(forwardDISETOp)
def generateForwardDISET(self): """Generate and return failover requests for the operations in the internal cache""" forwardDISETOp = None result = self.sendStoredStatusInfo() if not result["OK"]: gLogger.error("Error while sending the job status", result["Message"]) if "rpcStub" in result: rpcStub = result["rpcStub"] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR("Could not create ForwardDISET operation") return S_OK(forwardDISETOp)
def generateForwardDISET(self): """ Generate and return failover requests for the operations in the internal cache """ fowardDISETOp = None result = self.sendStoredStatusInfo() if not result["OK"]: if "rpcStub" in result: rpcStub = result["rpcStub"] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR("Could not create job parameters sub-request") return S_OK(fowardDISETOp)
def test_valid_properties(): operation = Operation() operation.Arguments = "foobar" assert operation.Arguments == b"foobar", "wrong Arguments" operation.SourceSE = "CERN-RAW" assert operation.SourceSE == "CERN-RAW", "wrong SourceSE" operation.TargetSE = "CERN-RAW" assert operation.TargetSE == "CERN-RAW", "wrong TargetSE" operation.Catalog = "" assert operation.Catalog == "", "wrong Catalog" operation.Catalog = "BookkeepingDB" assert operation.Catalog == "BookkeepingDB", "wrong Catalog" operation.Error = "error" assert operation.Error == "error", "wrong Error" toJSON = operation.toJSON() assert toJSON["OK"]
def createRequest(self, requestName, archiveLFN, lfnChunk): """Create the Request.""" request = Request() request.RequestName = requestName self._checkReplicaSites(request, lfnChunk) archiveFiles = Operation() archiveFiles.Type = 'ArchiveFiles' archiveFiles.Arguments = DEncode.encode({'SourceSE': self.sourceSEs[0], 'TarballSE': self.switches['TarballSE'], 'RegisterDescendent': self.switches['RegisterDescendent'], 'ArchiveLFN': archiveLFN}) self.addLFNs(archiveFiles, lfnChunk) request.addOperation(archiveFiles) # Replicate the Tarball, ArchiveFiles will upload it if self.switches.get('ReplicateTarball'): replicateAndRegisterTarBall = Operation() replicateAndRegisterTarBall.Type = 'ReplicateAndRegister' replicateAndRegisterTarBall.TargetSE = self.targetSE opFile = File() opFile.LFN = archiveLFN replicateAndRegisterTarBall.addFile(opFile) request.addOperation(replicateAndRegisterTarBall) if self.switches.get('CheckMigration'): checkMigrationTarBall = Operation() checkMigrationTarBall.Type = 'CheckMigration' migrationTarget = self.targetSE if self.switches.get('ReplicateTarball') else self.switches['TarballSE'] checkMigrationTarBall.TargetSE = migrationTarget opFile = File() opFile.LFN = archiveLFN checkMigrationTarBall.addFile(opFile) request.addOperation(checkMigrationTarBall) # Register Archive Replica for LFNs if self.switches.get('ArchiveSE'): registerArchived = Operation() registerArchived.Type = 'RegisterReplica' registerArchived.TargetSE = self.switches.get('ArchiveSE') self.addLFNs(registerArchived, lfnChunk, addPFN=True) request.addOperation(registerArchived) # Remove all Other Replicas for LFNs if self.switches.get('RemoveReplicas'): removeArchiveReplicas = Operation() removeArchiveReplicas.Type = 'RemoveReplica' removeArchiveReplicas.TargetSE = ','.join(self.replicaSEs) self.addLFNs(removeArchiveReplicas, lfnChunk) request.addOperation(removeArchiveReplicas) # Remove all Replicas for LFNs if self.switches.get('RemoveFiles'): removeArchiveFiles = Operation() removeArchiveFiles.Type = 'RemoveFile' self.addLFNs(removeArchiveFiles, lfnChunk) request.addOperation(removeArchiveFiles) # Remove Original tarball replica if self.switches.get('ReplicateTarball'): removeTarballOrg = Operation() removeTarballOrg.Type = 'RemoveReplica' removeTarballOrg.TargetSE = self.sourceSEs[0] opFile = File() opFile.LFN = archiveLFN removeTarballOrg.addFile(opFile) request.addOperation(removeTarballOrg) return request
def test02props(self): """ test properties """ # # valid values operation = Operation() operation.Arguments = "foobar" self.assertEqual(operation.Arguments, "foobar", "wrong Arguments") operation.SourceSE = "CERN-RAW" self.assertEqual(operation.SourceSE, "CERN-RAW", "wrong SourceSE") operation.TargetSE = "CERN-RAW" self.assertEqual(operation.TargetSE, "CERN-RAW", "wrong TargetSE") operation.Catalog = "" self.assertEqual(operation.Catalog, "", "wrong Catalog") operation.Catalog = "BookkeepingDB" self.assertEqual(operation.Catalog, "BookkeepingDB", "wrong Catalog") operation.Error = "error" self.assertEqual(operation.Error, "error", "wrong Error") # # wrong props try: operation.RequestID = "foo" except Exception as error: self.assertEqual(type(error), AttributeError, "wrong exc raised") self.assertEqual(str(error), "can't set attribute", "wrong exc reason") try: operation.OperationID = "foo" except Exception as error: self.assertEqual(type(error), ValueError, "wrong exc raised") # # timestamps try: operation.SubmitTime = "foo" except Exception as error: self.assertEqual(type(error), ValueError, "wrong exp raised") self.assertEqual( str(error), "time data 'foo' does not match format '%Y-%m-%d %H:%M:%S'", "wrong exc reason") try: operation.LastUpdate = "foo" except Exception as error: self.assertEqual(type(error), ValueError, "wrong exc raised") self.assertEqual( str(error), "time data 'foo' does not match format '%Y-%m-%d %H:%M:%S'", "wrong exc reason") # # Status operation = Operation() try: operation.Status = "foo" except Exception as error: self.assertEqual(type(error), ValueError, "wrong exc raised") self.assertEqual(str(error), "unknown Status 'foo'", "wrong exc reason") operation.addFile(File({"Status": "Waiting", "LFN": "/a"}))
def execute(self, production_id=None, prod_job_id=None, wms_job_id=None, workflowStatus=None, stepStatus=None, wf_commons=None, step_commons=None, step_number=None, step_id=None, SEs=None, fileDescendants=None): """ Main execution function. 1. Determine the final list of possible output files for the workflow and all the parameters needed to upload them. 2. Verifying that the input files have no descendants (and exiting with error, otherwise) 3. Sending the BK records for the steps of the job 4. Transfer output files in their destination, register in the FC (with failover) 5. Registering the output files in the Bookkeeping """ try: super(UploadOutputData, self).execute(self.version, production_id, prod_job_id, wms_job_id, workflowStatus, stepStatus, wf_commons, step_commons, step_number, step_id) # This returns all Tier1-Failover unless a specific one is defined for the site self.failoverSEs = getDestinationSEList('Tier1-Failover', self.siteName, outputmode='Any') random.shuffle(self.failoverSEs) self._resolveInputVariables() if not self._checkWFAndStepStatus(): return S_OK( "Failures detected in previous steps: no output data upload attempted" ) # ## 1. Determine the final list of possible output files # ## for the workflow and all the parameters needed to upload them. # ## self.log.verbose("Getting the list of candidate files") fileDict = self.getCandidateFiles(self.outputList, self.prodOutputLFNs, self.outputDataFileMask, self.outputDataStep) fileMetadata = self.getFileMetadata(fileDict) if not fileMetadata: self.log.info( "No output data files were determined to be uploaded for this workflow" ) return S_OK() # Get final, resolved SE list for files final = {} for fileName, metadata in fileMetadata.iteritems(): if not SEs: resolvedSE = getDestinationSEList( metadata['workflowSE'], self.siteName, self.outputMode, self.workflow_commons.get('runNumber')) else: resolvedSE = SEs final[fileName] = metadata final[fileName]['resolvedSE'] = resolvedSE self.log.info("The following files will be uploaded", ": %s" % (', '.join(final.keys()))) for fileName, metadata in final.items(): self.log.info('--------%s--------' % fileName) for name, val in metadata.iteritems(): self.log.info('%s = %s' % (name, val)) if not self._enableModule(): # At this point can exit and see exactly what the module would have uploaded self.log.info( "Module disabled", "would have attempted to upload the files %s" % ', '.join(final.keys())) # ## 2. Prior to uploading any files must check (for productions with input data) that no descendant files # ## already exist with replica flag in the BK. # ## if self.inputDataList: if fileDescendants is not None: lfnsWithDescendants = fileDescendants else: if not self._enableModule(): self.log.info( "Module disabled", "would have attempted to check the files %s" % ', '.join(self.inputDataList)) lfnsWithDescendants = [] else: lfnsWithDescendants = getFileDescendants( self.production_id, self.inputDataList, dm=self.dataManager, bkClient=self.bkClient) if not lfnsWithDescendants: self.log.info( "No descendants found, outputs can be uploaded") else: self.log.error( "Found descendants!!! Outputs won't be uploaded") self.log.info("Files with descendants", ": %s" ' % '.join(lfnsWithDescendants)) self.log.info( "The files above will be set as 'Processed', other lfns in input will be later reset as Unused" ) self.fileReport.setFileStatus(int(self.production_id), lfnsWithDescendants, 'Processed') return S_ERROR("Input Data Already Processed") # ## 3. Sending the BK records for the steps of the job # ## bkFileExtensions = ['bookkeeping*.xml'] bkFiles = [] for ext in bkFileExtensions: self.log.debug("Looking at BK record wildcard: %s" % ext) globList = glob.glob(ext) for check in globList: if os.path.isfile(check): self.log.verbose( "Found locally existing BK file record", ": %s" % check) bkFiles.append(check) # Unfortunately we depend on the file names to order the BK records bkFilesListTuples = [] for bk in bkFiles: bkFilesListTuples.append( (bk, int(bk.split('_')[-1].split('.')[0]))) bkFiles = [ bk[0] for bk in sorted(bkFilesListTuples, key=itemgetter(1)) ] self.log.info("The following BK records will be sent", ": %s" % (', '.join(bkFiles))) if self._enableModule(): for bkFile in bkFiles: with open(bkFile, 'r') as fd: bkXML = fd.read() self.log.info("Sending BK record", ":\n%s" % (bkXML)) result = self.bkClient.sendXMLBookkeepingReport(bkXML) self.log.verbose(result) if result['OK']: self.log.info("Bookkeeping report sent", "for %s" % bkFile) else: self.log.error( "Could not send Bookkeeping XML file to server", ": %s" % result['Message']) self.log.info("Preparing DISET request", "for %s" % bkFile) bkDISETReq = Operation() bkDISETReq.Type = 'ForwardDISET' bkDISETReq.Arguments = DEncode.encode( result['rpcStub']) self.request.addOperation(bkDISETReq) self.workflow_commons[ 'Request'] = self.request # update each time, just in case else: self.log.info( "Would have attempted to send bk records, but module is disabled" ) # ## 4. Transfer output files in their destination, register in the FC (with failover) # ## # Disable the watchdog check in case the file uploading takes a long time self._disableWatchdogCPUCheck() # Instantiate the failover transfer client with the global request object if not self.failoverTransfer: self.failoverTransfer = FailoverTransfer(self.request) # Track which files are successfully uploaded (not to failover) via performBKRegistration = [] # Failover replicas are always added to the BK when they become available (actually, added to all the catalogs) failover = {} for fileName, metadata in final.items(): targetSE = metadata['resolvedSE'] self.log.info( "Attempting to store file to SE", "%s to the following SE(s):\n%s" % (fileName, ', '.join(targetSE))) fileMetaDict = { 'Size': metadata['filedict']['Size'], 'LFN': metadata['filedict']['LFN'], 'GUID': metadata['filedict']['GUID'], 'Checksum': metadata['filedict']['Checksum'], 'ChecksumType': metadata['filedict']['ChecksumType'] } if not self._enableModule(): # At this point can exit and see exactly what the module would have uploaded self.log.info( "Module disabled", "would have attempted to upload file %s" % fileName) continue result = self.failoverTransfer.transferAndRegisterFile( fileName=fileName, localPath=metadata['localpath'], lfn=metadata['filedict']['LFN'], destinationSEList=targetSE, fileMetaDict=fileMetaDict, masterCatalogOnly=True) if not result['OK']: self.log.error( "Could not transfer and register", " %s with metadata:\n %s" % (fileName, metadata)) failover[fileName] = metadata else: self.log.info( "File uploaded, will be registered in BK if all files uploaded for job", "(%s)" % fileName) # if the files are uploaded in the SE, independently if the registration in the FC is done, # then we have to register all of them in the BKK performBKRegistration.append(metadata) cleanUp = False for fileName, metadata in failover.items(): self.log.info( "Setting default catalog for failover transfer registration to master catalog" ) random.shuffle(self.failoverSEs) targetSE = metadata['resolvedSE'][0] metadata['resolvedSE'] = self.failoverSEs fileMetaDict = { 'Size': metadata['filedict']['Size'], 'LFN': metadata['filedict']['LFN'], 'GUID': metadata['filedict']['GUID'], 'Checksum': metadata['filedict']['Checksum'], 'ChecksumType': metadata['filedict']['ChecksumType'] } if not self._enableModule(): # At this point can exit and see exactly what the module would have uploaded self.log.info( "Module disabled", "would have attempted to upload with failover file %s" % fileName) continue result = self.failoverTransfer.transferAndRegisterFileFailover( fileName=fileName, localPath=metadata['localpath'], lfn=metadata['filedict']['LFN'], targetSE=targetSE, failoverSEList=metadata['resolvedSE'], fileMetaDict=fileMetaDict, masterCatalogOnly=True) if not result['OK']: self.log.error( "Could not transfer and register", "%s in failover with metadata:\n %s" % (fileName, metadata)) cleanUp = True break # no point continuing if one completely fails # Now after all operations, retrieve potentially modified request object self.request = self.failoverTransfer.request # If some or all of the files failed to be saved even to failover if cleanUp and self._enableModule(): self._cleanUp(final) self.workflow_commons['Request'] = self.request return S_ERROR('Failed to upload output data') # For files correctly uploaded must report LFNs to job parameters if final and self._enableModule(): report = ', '.join(final.keys()) self.setJobParameter('UploadedOutputData', report) # ## 5. Can now register the successfully uploaded files in the BK i.e. set the BK replica flags # ## if not performBKRegistration: self.log.info( "There are no files to perform the BK registration for, all are in failover" ) elif self._enableModule(): # performing BK registration # Getting what should be registered immediately, and what later lfnsToRegisterInBK = set([ metadata['filedict']['LFN'] for metadata in performBKRegistration ]) lfnsToRegisterInBKNow = self._getLFNsForBKRegistration( lfnsToRegisterInBK) lfnsToRegisterInBKLater = list(lfnsToRegisterInBK - set(lfnsToRegisterInBKNow)) # Registering what should be registering immediately, and handling failures result = FileCatalog( catalogs=['BookkeepingDB']).addFile(lfnsToRegisterInBKNow) self.log.verbose("BookkeepingDB.addFile: %s" % result) if not result['OK']: self.log.error(result) return S_ERROR("Could Not Perform BK Registration") if 'Failed' in result['Value'] and result['Value']['Failed']: for lfn, error in result['Value']['Failed'].iteritems(): lfnMetadata = {} for lfnMD in performBKRegistration: if lfnMD[ 'lfn'] == lfn: # the lfn is indeed both at lfnMD['lfn'] and at lfnMD['filedict']['LFN'] lfnMetadata = lfnMD['filedict'] break self.setBKRegistrationRequest(lfn, error=error, metaData=lfnMetadata) # Adding a registration request for what whould be registered later if lfnsToRegisterInBKLater: for lfnMD in performBKRegistration: if lfnMD['lfn'] in lfnsToRegisterInBKLater: lfnMetadata = lfnMD['filedict'] self.setBKRegistrationRequest(lfnMD['lfn'], metaData=lfnMetadata) self.workflow_commons['Request'] = self.request return S_OK("Output data uploaded") except Exception as e: # pylint:disable=broad-except self.log.exception('Exception in UploadOutputData', lException=e) self.setApplicationStatus(repr(e)) return S_ERROR(str(e)) finally: super(UploadOutputData, self).finalize(self.version)
def createRequest(self, requestName, archiveLFN, lfnChunk): """Create the Request.""" request = Request() request.RequestName = requestName self._checkReplicaSites(request, lfnChunk) archiveFiles = Operation() archiveFiles.Type = "ArchiveFiles" archiveFiles.Arguments = DEncode.encode({ "SourceSE": self.sourceSEs[0], "TarballSE": self.switches["TarballSE"], "RegisterDescendent": self.switches["RegisterDescendent"], "ArchiveLFN": archiveLFN, }) self.addLFNs(archiveFiles, lfnChunk) request.addOperation(archiveFiles) # Replicate the Tarball, ArchiveFiles will upload it if self.switches.get("ReplicateTarball"): replicateAndRegisterTarBall = Operation() replicateAndRegisterTarBall.Type = "ReplicateAndRegister" replicateAndRegisterTarBall.TargetSE = self.targetSE opFile = File() opFile.LFN = archiveLFN replicateAndRegisterTarBall.addFile(opFile) request.addOperation(replicateAndRegisterTarBall) if self.switches.get("CheckMigration"): checkMigrationTarBall = Operation() checkMigrationTarBall.Type = "CheckMigration" migrationTarget = self.targetSE if self.switches.get( "ReplicateTarball") else self.switches["TarballSE"] checkMigrationTarBall.TargetSE = migrationTarget opFile = File() opFile.LFN = archiveLFN checkMigrationTarBall.addFile(opFile) request.addOperation(checkMigrationTarBall) # Register Archive Replica for LFNs if self.switches.get("ArchiveSE"): registerArchived = Operation() registerArchived.Type = "RegisterReplica" registerArchived.TargetSE = self.switches.get("ArchiveSE") self.addLFNs(registerArchived, lfnChunk, addPFN=True) request.addOperation(registerArchived) # Remove all Other Replicas for LFNs if self.switches.get("RemoveReplicas"): removeArchiveReplicas = Operation() removeArchiveReplicas.Type = "RemoveReplica" removeArchiveReplicas.TargetSE = ",".join(self.replicaSEs) self.addLFNs(removeArchiveReplicas, lfnChunk) request.addOperation(removeArchiveReplicas) # Remove all Replicas for LFNs if self.switches.get("RemoveFiles"): removeArchiveFiles = Operation() removeArchiveFiles.Type = "RemoveFile" self.addLFNs(removeArchiveFiles, lfnChunk) request.addOperation(removeArchiveFiles) # Remove Original tarball replica if self.switches.get("ReplicateTarball"): removeTarballOrg = Operation() removeTarballOrg.Type = "RemoveReplica" removeTarballOrg.TargetSE = self.sourceSEs[0] opFile = File() opFile.LFN = archiveLFN removeTarballOrg.addFile(opFile) request.addOperation(removeTarballOrg) return request
def execute(self, production_id=None, prod_job_id=None, wms_job_id=None, workflowStatus=None, stepStatus=None, wf_commons=None, step_commons=None, step_number=None, step_id=None): """ Main execution function. """ try: super(FailoverRequest, self).execute(self.version, production_id, prod_job_id, wms_job_id, workflowStatus, stepStatus, wf_commons, step_commons, step_number, step_id) if not self._enableModule(): return S_OK() self._resolveInputVariables() # preparing the request, just in case self.request.RequestName = 'job_%d_request.xml' % self.jobID self.request.JobID = self.jobID self.request.SourceComponent = "Job_%d" % self.jobID # report on the status of the input data, by default they are 'Processed', unless the job failed # failures happening before (e.g. in previous steps, or while inspecting the XML summary) are not touched. # The FileReport object is normally empty, unless there are some Problematic files, # or if there are files found to have descendants filesInFileReport = self.fileReport.getFiles() if not self._checkWFAndStepStatus(noPrint=True): # To overcome race condition issues, the file status for this case is reported by the failover request statusDict = {} for lfn in self.inputDataList: if lfn not in filesInFileReport: self.log.info( "Add operation to set status 'Unused' due to workflow failure for input file: %s" % (lfn)) statusDict[lfn] = 'Unused' if statusDict: # Avoid setting an empty request setFileStatusOp = Operation() setFileStatusOp.Type = 'SetFileStatus' setFileStatusOp.Arguments = DEncode.encode({ 'transformation': int(self.production_id), 'statusDict': statusDict, 'force': False }) self.request.addOperation(setFileStatusOp) else: for lfn in self.inputDataList: if lfn not in filesInFileReport: self.log.verbose( "No status populated for input data %s, setting to 'Processed'" % lfn) self.fileReport.setFileStatus(int(self.production_id), lfn, 'Processed') result = self.fileReport.commit() # If there are still files to set, try a second time and generate a request if it fails again if self.fileReport.getFiles(): self.log.error( "Failed to report file status to TransformationDB") # This will try a second time a commit, before generating a SetFileStatus operation result = self.fileReport.generateForwardDISET() if not result['OK']: self.log.warn( "Could not generate Operation for file report with result:\n%s" % (result['Value'])) else: if result[ 'Value'] is None: # Means the FileReport managed to report, no need for a new operation self.log.info( "On second trial, files correctly reported to TransformationDB" ) else: self.log.info( "Populating request with file report info (SetFileStatus operation)" ) result = self.request.addOperation(result['Value']) if not result['OK']: return result elif result['Value']: self.log.info( "Status of files have been properly updated in the TransformationDB" ) # Must ensure that the local job report instance is used to report the final status # in case of failure and a subsequent failover operation if self.workflowStatus['OK'] and self.stepStatus['OK']: self.setApplicationStatus("Job Finished Successfully") self.generateFailoverFile() return S_OK() except Exception as e: # pylint:disable=broad-except self.log.exception("Failure in FailoverRequest execute module", lException=e) self.setApplicationStatus(repr(e)) return S_ERROR(str(e)) finally: super(FailoverRequest, self).finalize(self.version)