def _createLogUploadRequest(self, targetSE, logFileLFN, uploadedSE): """ Set a request to upload job log files from the output sandbox Changed to be similar to LHCb createLogUploadRequest using LHCb LogUpload Request and Removal Request """ self.log.info('Setting log upload request for %s at %s' %(targetSE, logFileLFN)) request = self._getRequestContainer() logUpload = Operation() logUpload.Type = "LogUpload" logUpload.TargetSE = targetSE upFile = File() upFile.LFN = logFileLFN logUpload.addFile( upFile ) logRemoval = Operation() logRemoval.Type = 'RemoveFile' logRemoval.TargetSE = uploadedSE logRemoval.addFile( upFile ) request.addOperation ( logUpload ) request.addOperation ( logRemoval ) self.workflow_commons['Request'] = request return S_OK()
def buildRequest( self, owner, group, sourceSE, targetSE1, targetSE2 ): files = self.files( owner, group ) putAndRegister = Operation() putAndRegister.Type = "PutAndRegister" putAndRegister.TargetSE = sourceSE for fname, lfn, size, checksum, guid in files: putFile = File() putFile.LFN = lfn putFile.PFN = fname putFile.Checksum = checksum putFile.ChecksumType = "adler32" putFile.Size = size putFile.GUID = guid putAndRegister.addFile( putFile ) replicateAndRegister = Operation() replicateAndRegister.Type = "ReplicateAndRegister" replicateAndRegister.TargetSE = "%s,%s" % ( targetSE1, targetSE2 ) for fname, lfn, size, checksum, guid in files: repFile = File() repFile.LFN = lfn repFile.Size = size repFile.Checksum = checksum repFile.ChecksumType = "adler32" replicateAndRegister.addFile( repFile ) removeReplica = Operation() removeReplica.Type = "RemoveReplica" removeReplica.TargetSE = sourceSE for fname, lfn, size, checksum, guid in files: removeReplica.addFile( File( {"LFN": lfn } ) ) removeFile = Operation() removeFile.Type = "RemoveFile" for fname, lfn, size, checksum, guid in files: removeFile.addFile( File( {"LFN": lfn } ) ) removeFileInit = Operation() removeFileInit.Type = "RemoveFile" for fname, lfn, size, checksum, guid in files: removeFileInit.addFile( File( {"LFN": lfn } ) ) req = Request() req.addOperation( removeFileInit ) req.addOperation( putAndRegister ) req.addOperation( replicateAndRegister ) req.addOperation( removeReplica ) req.addOperation( removeFile ) return req
def __setRegistrationRequest( self, lfn, targetSE, fileDict, catalog ): """ Sets a registration request :param str lfn: LFN :param list se: list of SE :param list catalog: list of catalogs to use :param dict fileDict: file metadata """ self.log.info( 'Setting registration request for %s at %s.' % ( lfn, targetSE ) ) for cat in catalog: register = Operation() register.Type = "RegisterFile" register.Catalog = cat register.TargetSE = targetSE regFile = File() regFile.LFN = lfn regFile.Checksum = fileDict.get( "Checksum", "" ) regFile.ChecksumType = fileDict.get( "ChecksumType", "" ) regFile.Size = fileDict.get( "Size", 0 ) regFile.GUID = fileDict.get( "GUID", "" ) se = StorageElement( targetSE ) pfn = se.getPfnForLfn( lfn ) if not pfn["OK"]: self.log.error( "unable to get PFN for LFN: %s" % pfn["Message"] ) return pfn regFile.PFN = pfn["Value"] register.addFile( regFile ) self.request.addOperation( register ) return S_OK()
def __insertRegisterOperation( self, request, operation, toRegister ): """ add RegisterReplica operation :param Request request: request instance :param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob :param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register """ log = self.log.getSubLogger( "req_%s/%s/registerFiles" % ( request.RequestID, request.RequestName ) ) byTarget = {} for ftsFile in toRegister: if ftsFile.TargetSE not in byTarget: byTarget.setdefault( ftsFile.TargetSE, [] ) byTarget[ftsFile.TargetSE].append( ftsFile ) log.info( "will create %s 'RegisterReplica' operations" % len( byTarget ) ) for target, ftsFileList in byTarget.iteritems(): log.info( "creating 'RegisterReplica' operation for targetSE %s with %s files..." % ( target, len( ftsFileList ) ) ) registerOperation = Operation() registerOperation.Type = "RegisterReplica" registerOperation.Status = "Waiting" registerOperation.TargetSE = target targetSE = StorageElement( target ) for ftsFile in ftsFileList: opFile = File() opFile.LFN = ftsFile.LFN pfn = returnSingleResult( targetSE.getURL( ftsFile.LFN, protocol = self.registrationProtocols ) ) if not pfn["OK"]: continue opFile.PFN = pfn["Value"] registerOperation.addFile( opFile ) request.insertBefore( registerOperation, operation ) return S_OK()
def __deleteSandboxFromExternalBackend( self, SEName, SEPFN ): if self.getCSOption( "DelayedExternalDeletion", True ): gLogger.info( "Setting deletion request" ) try: request = Request() request.RequestName = "RemoteSBDeletion:%s|%s:%s" % ( SEName, SEPFN, time.time() ) physicalRemoval = Operation() physicalRemoval.Type = "PhysicalRemoval" physicalRemoval.TargetSE = SEName fileToRemove = File() fileToRemove.PFN = SEPFN physicalRemoval.addFile( fileToRemove ) request.addOperation( physicalRemoval ) return ReqClient().putRequest( request ) except Exception as e: gLogger.exception( "Exception while setting deletion request" ) return S_ERROR( "Cannot set deletion request: %s" % str( e ) ) else: gLogger.info( "Deleting external Sandbox" ) try: return StorageElement( SEName ).removeFile( SEPFN ) except Exception as e: gLogger.exception( "RM raised an exception while trying to delete a remote sandbox" ) return S_ERROR( "RM raised an exception while trying to delete a remote sandbox" )
def myRequest(): """Create a request and put it to the db""" request = Request() request.RequestName = 'myAwesomeRemovalRequest.xml' request.JobID = 0 request.SourceComponent = "myScript" remove = Operation() remove.Type = "RemoveFile" lfn = "/ilc/user/s/sailer/test.txt" rmFile = File() rmFile.LFN = lfn remove.addFile( rmFile ) request.addOperation( remove ) isValid = RequestValidator().validate( request ) if not isValid['OK']: raise RuntimeError( "Failover request is not valid: %s" % isValid['Message'] ) else: print("It is a GOGOGO") requestClient = ReqClient() result = requestClient.putRequest( request ) print(result)
def __setFileReplicationRequest( self, lfn, targetSE, fileMetaDict, sourceSE = '' ): """ Sets a registration request. """ self.log.info( 'Setting replication request for %s to %s' % ( lfn, targetSE ) ) transfer = Operation() transfer.Type = "ReplicateAndRegister" transfer.TargetSE = targetSE if sourceSE: transfer.SourceSE = sourceSE trFile = File() trFile.LFN = lfn cksm = fileMetaDict.get( "Checksum", None ) cksmType = fileMetaDict.get( "ChecksumType", None ) if cksm and cksmType: trFile.Checksum = cksm trFile.ChecksumType = cksmType size = fileMetaDict.get( "Size", 0 ) if size: trFile.Size = size guid = fileMetaDict.get( "GUID", "" ) if guid: trFile.GUID = guid transfer.addFile( trFile ) self.request.addOperation( transfer ) return S_OK()
def prepareTransformationTasks( self, transBody, taskDict, owner = '', ownerGroup = '', ownerDN = '' ): """ Prepare tasks, given a taskDict, that is created (with some manipulation) by the DB """ if ( not owner ) or ( not ownerGroup ): res = getProxyInfo( False, False ) if not res['OK']: return res proxyInfo = res['Value'] owner = proxyInfo['username'] ownerGroup = proxyInfo['group'] if not ownerDN: res = getDNForUsername( owner ) if not res['OK']: return res ownerDN = res['Value'][0] requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split( ';' ) except AttributeError: pass for taskID in sorted( taskDict ): paramDict = taskDict[taskID] if paramDict['InputData']: transID = paramDict['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = paramDict['TargetSE'] if isinstance( paramDict['InputData'], list ): files = paramDict['InputData'] elif isinstance( paramDict['InputData'], basestring ): files = paramDict['InputData'].split( ';' ) for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile( trFile ) oRequest.addOperation( transfer ) oRequest.RequestName = _requestName( transID, taskID ) oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup isValid = self.requestValidator.validate( oRequest ) if not isValid['OK']: return isValid taskDict[taskID]['TaskObject'] = oRequest return S_OK( taskDict )
def _sendToFailover( rpcStub ): """ Create a ForwardDISET operation for failover """ request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % ( time.time(), random.random() ) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) request.addOperation( forwardDISETOp ) return ReqClient().putRequest( request )
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() commitOp = None if not result['OK']: # Generate Request commitOp = Operation() commitOp.Type = 'SetFileStatus' commitOp.Arguments = DEncode.encode( {'transformation':self.transformation, 'statusDict':self.statusDict, 'force':self.force} ) return S_OK( commitOp )
def _multiOperationsBody(self, transJson, taskDict, ownerDN, ownerGroup): """ deal with a Request that has multiple operations :param transJson: list of lists of string and dictionaries, e.g.: .. code :: python body = [ ( "ReplicateAndRegister", { "SourceSE":"FOO-SRM", "TargetSE":"BAR-SRM" }), ( "RemoveReplica", { "TargetSE":"FOO-SRM" } ), ] :param dict taskDict: dictionary of tasks, modified in this function :param str ownerDN: certificate DN used for the requests :param str onwerGroup: dirac group used for the requests :returns: None """ failedTasks = [] for taskID, task in taskDict.items(): transID = task['TransformationID'] if not task.get('InputData'): self._logError("Error creating request for task", "%s, No input data" % taskID, transID=transID) taskDict.pop(taskID) continue files = [] oRequest = Request() if isinstance(task['InputData'], list): files = task['InputData'] elif isinstance(task['InputData'], basestring): files = task['InputData'].split(';') # create the operations from the json structure for operationTuple in transJson: op = Operation() op.Type = operationTuple[0] for parameter, value in operationTuple[1].iteritems(): setattr(op, parameter, value) for lfn in files: opFile = File() opFile.LFN = lfn op.addFile(opFile) oRequest.addOperation(op) result = self._assignRequestToTask(oRequest, taskDict, transID, taskID, ownerDN, ownerGroup) if not result['OK']: failedTasks.append(taskID) # Remove failed tasks for taskID in failedTasks: taskDict.pop(taskID)
def addRemovalRequests(self, lfnList): """Create removalRequests for lfns in lfnList and add it to the common request""" request = self._getRequestContainer() remove = Operation() remove.Type = "RemoveFile" for lfn in lfnList: rmFile = File() rmFile.LFN = lfn remove.addFile( rmFile ) request.addOperation( remove ) self.workflow_commons['Request'] = request
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() forwardDISETOp = None if not result['OK']: # Generate Request if "FailedResults" in result: for res in result['FailedResults']: if 'rpcStub' in res: forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( res['rpcStub'] ) return S_OK( forwardDISETOp )
def __setFileRemovalRequest( self, lfn, se = '', pfn = '' ): """ Sets a removal request for a file including all replicas. """ remove = Operation() remove.Type = "RemoveFile" if se: remove.TargetSE = se rmFile = File() rmFile.LFN = lfn if pfn: rmFile.PFN = pfn remove.addFile( rmFile ) self.request.addOperation( remove ) return S_OK()
def generateForwardDISET( self ): """ Commit the accumulated records and generate request eventually """ result = self.commit() forwardDISETOp = None if not result['OK']: # Generate Request if result.has_key( 'rpcStub' ): forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( result['rpcStub'] ) else: return S_ERROR( 'Could not create ForwardDISET operation' ) return S_OK( forwardDISETOp )
def test03sql( self ): """ sql insert or update """ operation = Operation() operation.Type = "ReplicateAndRegister" request = Request() request.RequestName = "testRequest" request.RequestID = 1 # # no parent request set try: operation.toSQL() except Exception, error: self.assertEqual( isinstance( error, AttributeError ), True, "wrong exc raised" ) self.assertEqual( str( error ), "RequestID not set", "wrong exc reason" )
def _sendToFailover( rpcStub ): """ Create a ForwardDISET operation for failover """ try: request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % ( time.time(), random.random() ) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) request.addOperation( forwardDISETOp ) return ReqClient().putRequest( request ) # We catch all the exceptions, because it should never crash except Exception as e: # pylint: disable=broad-except return S_ERROR( ERMSUKN, "Exception sending accounting failover request: %s" % repr( e ) )
def _singleOperationsBody(self, transBody, taskDict, ownerDN, ownerGroup): """ deal with a Request that has just one operation, as it was sofar :param transBody: string, can be an empty string :param dict taskDict: dictionary of tasks, modified in this function :param str ownerDN: certificate DN used for the requests :param str onwerGroup: dirac group used for the requests :returns: None """ requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split(';') except AttributeError: pass failedTasks = [] # Do not remove sorted, we might pop elements in the loop for taskID, task in taskDict.iteritems(): transID = task['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = task['TargetSE'] # If there are input files if task.get('InputData'): if isinstance(task['InputData'], list): files = task['InputData'] elif isinstance(task['InputData'], basestring): files = task['InputData'].split(';') for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile(trFile) oRequest.addOperation(transfer) result = self._assignRequestToTask(oRequest, taskDict, transID, taskID, ownerDN, ownerGroup) if not result['OK']: failedTasks.append(taskID) # Remove failed tasks for taskID in failedTasks: taskDict.pop(taskID)
def __setRemovalRequest( self, lfn, ownerDN, ownerGroup ): """ Set removal request with the given credentials """ oRequest = Request() oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup oRequest.RequestName = os.path.basename( lfn ).strip() + '_removal_request.xml' oRequest.SourceComponent = 'JobCleaningAgent' removeFile = Operation() removeFile.Type = 'RemoveFile' removedFile = File() removedFile.LFN = lfn removeFile.addFile( removedFile ) oRequest.addOperation( removeFile ) return ReqClient().putRequest( oRequest )
def generateForwardDISET(self): """Generate and return failover requests for the operations in the internal cache""" forwardDISETOp = None result = self.sendStoredStatusInfo() if not result["OK"]: gLogger.error("Error while sending the job status", result["Message"]) if "rpcStub" in result: rpcStub = result["rpcStub"] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR("Could not create ForwardDISET operation") return S_OK(forwardDISETOp)
def generateForwardDISET(self): """ Generate and return failover requests for the operations in the internal cache """ forwardDISETOp = None result = self.sendStoredStatusInfo() if not result['OK']: if 'rpcStub' in result: rpcStub = result['rpcStub'] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR('Could not create ForwardDISET operation') return S_OK(forwardDISETOp)
def generateForwardDISET(self): """ Generate and return failover requests for the operations in the internal cache """ fowardDISETOp = None result = self.sendStoredStatusInfo() if not result["OK"]: if "rpcStub" in result: rpcStub = result["rpcStub"] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) else: return S_ERROR("Could not create job parameters sub-request") return S_OK(fowardDISETOp)
def __deleteSandboxFromExternalBackend(self, SEName, SEPFN): if self.getCSOption("DelayedExternalDeletion", True): gLogger.info("Setting deletion request") try: request = Request() request.RequestName = "RemoteSBDeletion:%s|%s:%s" % ( SEName, SEPFN, time.time()) physicalRemoval = Operation() physicalRemoval.Type = "PhysicalRemoval" physicalRemoval.TargetSE = SEName fileToRemove = File() fileToRemove.PFN = SEPFN physicalRemoval.addFile(fileToRemove) request.addOperation(physicalRemoval) return ReqClient().putRequest(request) except Exception, e: gLogger.exception("Exception while setting deletion request") return S_ERROR("Cannot set deletion request: %s" % str(e))
def _sendToFailover(rpcStub): """ Create a ForwardDISET operation for failover """ try: request = Request() request.RequestName = "Accounting.DataStore.%s.%s" % (time.time(), random.random()) forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode(rpcStub) request.addOperation(forwardDISETOp) return ReqClient().putRequest(request) # We catch all the exceptions, because it should never crash except Exception as e: # pylint: disable=broad-except return S_ERROR( ERMSUKN, "Exception sending accounting failover request: %s" % repr(e))
def _setRegistrationRequest(self, lfn, targetSE, fileDict, catalog): """ Sets a registration request :param str lfn: LFN :param list se: list of SE (or just string) :param list catalog: list (or string) of catalogs to use :param dict fileDict: file metadata """ self.log.info('Setting registration request for %s at %s.' % (lfn, targetSE)) if not type(catalog) == type([]): catalog = [catalog] for cat in catalog: register = Operation() register.Type = "RegisterFile" register.Catalog = cat register.TargetSE = targetSE regFile = File() regFile.LFN = lfn regFile.Checksum = fileDict.get("Checksum", "") regFile.ChecksumType = fileDict.get("ChecksumType", self.defaultChecksumType) regFile.Size = fileDict.get("Size", 0) regFile.GUID = fileDict.get("GUID", "") se = StorageElement(targetSE) pfn = se.getPfnForLfn(lfn) if not pfn["OK"] or lfn not in pfn["Value"]['Successful']: self.log.error( "unable to get PFN for LFN: %s" % pfn.get('Message', pfn.get('Value', {}).get('Failed', {}).get(lfn))) return pfn regFile.PFN = pfn["Value"]['Successful'][lfn] register.addFile(regFile) self.request.addOperation(register) return S_OK()
def __setRemovalRequest(self, lfn, ownerDN, ownerGroup): """ Set removal request with the given credentials """ oRequest = Request() oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup oRequest.RequestName = os.path.basename(lfn).strip() + '_removal_request.xml' oRequest.SourceComponent = 'JobCleaningAgent' removeFile = Operation() removeFile.Type = 'RemoveFile' removedFile = File() removedFile.LFN = lfn removeFile.addFile(removedFile) oRequest.addOperation(removeFile) return ReqClient().putRequest(oRequest)
def generateForwardDISET( self ): """ Generate and return failover requests for the operations in the internal cache """ fowardDISETOp = None result = self.sendStoredStatusInfo() if not result['OK']: if 'rpcStub' in result: rpcStub = result['rpcStub'] forwardDISETOp = Operation() forwardDISETOp.Type = "ForwardDISET" forwardDISETOp.Arguments = DEncode.encode( rpcStub ) else: return S_ERROR( 'Could not create ForwardDISET operation' ) return S_OK( fowardDISETOp )
def prepareTransformationTasks( self, transBody, taskDict, owner = '', ownerGroup = '' ): """ Prepare tasks, given a taskDict, that is created (with some manipulation) by the DB """ requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split( ';' ) except AttributeError: pass for taskID in sorted( taskDict ): paramDict = taskDict[taskID] if paramDict['InputData']: transID = paramDict['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = paramDict['TargetSE'] if type( paramDict['InputData'] ) == type( [] ): files = paramDict['InputData'] elif type( paramDict['InputData'] ) == type( '' ): files = paramDict['InputData'].split( ';' ) for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile( trFile ) oRequest.addOperation( transfer ) oRequest.RequestName = str( transID ).zfill( 8 ) + '_' + str( taskID ).zfill( 8 ) oRequest.OwnerDN = owner oRequest.OwnerGroup = ownerGroup isValid = gRequestValidator.validate( oRequest ) if not isValid['OK']: return isValid taskDict[taskID]['TaskObject'] = oRequest return S_OK( taskDict )
def _singleOperationsBody(self, transBody, taskDict, ownerDN, ownerGroup ): """ deal with a Request that has just one operation, as it was sofar :param transBody: string, can be an empty string :param dict taskDict: dictionary of tasks, modified in this function :param str ownerDN: certificate DN used for the requests :param str onwerGroup: dirac group used for the requests :returns: None """ requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split( ';' ) except AttributeError: pass # Do not remove sorted, we might pop elements in the loop for taskID in sorted( taskDict ): paramDict = taskDict[taskID] transID = paramDict['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = paramDict['TargetSE'] # If there are input files if paramDict.get('InputData'): if isinstance( paramDict['InputData'], list ): files = paramDict['InputData'] elif isinstance( paramDict['InputData'], basestring ): files = paramDict['InputData'].split( ';' ) for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile( trFile ) oRequest.addOperation( transfer ) self._assignRequestToTask( oRequest, taskDict, transID, taskID, ownerDN, ownerGroup )
def _setRegistrationRequest(self, lfn, targetSE, fileDict, catalog): """Sets a registration request :param str lfn: LFN :param list se: list of SE (or just string) :param list catalog: list (or string) of catalogs to use :param dict fileDict: file metadata """ self.log.info("Setting registration request", "for %s at %s." % (lfn, targetSE)) if not isinstance(catalog, list): catalog = [catalog] for cat in catalog: register = Operation() register.Type = "RegisterFile" register.Catalog = cat register.TargetSE = targetSE regFile = File() regFile.LFN = lfn regFile.Checksum = fileDict.get("Checksum", "") regFile.ChecksumType = fileDict.get("ChecksumType", self.defaultChecksumType) regFile.Size = fileDict.get("Size", 0) regFile.GUID = fileDict.get("GUID", "") se = StorageElement(targetSE) res = returnSingleResult(se.getURL(lfn, self.registrationProtocols)) if not res["OK"]: self.log.error("Unable to get PFN for LFN", res["Message"]) return res regFile.PFN = res["Value"] register.addFile(regFile) self.request.addOperation(register) return S_OK()
def __setReplicaRemovalRequest( self, lfn, se ): """ Sets a removal request for a replica. :param str lfn: LFN :param se: """ if type( se ) == str: se = ",".join( [ se.strip() for se in se.split( "," ) if se.strip() ] ) removeReplica = Operation() removeReplica.Type = "RemoveReplica" removeReplica.TargetSE = se replicaToRemove = File() replicaToRemove.LFN = lfn removeReplica.addFile( replicaToRemove ) self.request.addOperation( removeReplica ) return S_OK()
def getRegisterOperation( self, opFile, targetSE ): """ add RegisterReplica operation for file :param File opFile: operation file :param str targetSE: target SE """ # # add RegisterReplica operation registerOperation = Operation() registerOperation.Type = "RegisterFile" registerOperation.TargetSE = targetSE registerFile = File() registerFile.LFN = opFile.LFN registerFile.PFN = opFile.PFN registerFile.GUID = opFile.GUID registerFile.Checksum = opFile.Checksum registerFile.ChecksumType = opFile.ChecksumType registerFile.Size = opFile.Size registerOperation.addFile( registerFile ) return registerOperation
def _setReplicaRemovalRequest(self, lfn, se): """Sets a removal request for a replica. :param str lfn: LFN :param se: """ if isinstance(se, str): se = ",".join([se.strip() for se in se.split(",") if se.strip()]) removeReplica = Operation() removeReplica.Type = "RemoveReplica" removeReplica.TargetSE = se replicaToRemove = File() replicaToRemove.LFN = lfn removeReplica.addFile(replicaToRemove) self.request.addOperation(removeReplica) return S_OK()
def getRegisterOperation(self, opFile, targetSE): """ add RegisterReplica operation for file :param File opFile: operation file :param str targetSE: target SE """ # # add RegisterReplica operation registerOperation = Operation() registerOperation.Type = "RegisterFile" registerOperation.TargetSE = targetSE registerFile = File() registerFile.LFN = opFile.LFN registerFile.PFN = opFile.PFN registerFile.GUID = opFile.GUID registerFile.Checksum = opFile.Checksum registerFile.ChecksumType = opFile.ChecksumType registerFile.Size = opFile.Size registerOperation.addFile(registerFile) return registerOperation
def __deleteSandboxFromExternalBackend(self, SEName, SEPFN): if self.getCSOption("DelayedExternalDeletion", True): gLogger.info("Setting deletion request") try: # We need the hostDN used in order to pass these credentials to the # SandboxStoreDB.. hostCertLocation, _ = Locations.getHostCertificateAndKeyLocation() hostCert = X509Certificate.X509Certificate() hostCert.loadFromFile(hostCertLocation) hostDN = hostCert.getSubjectDN().get("Value") # use the host authentication to fetch the data result = self.sandboxDB.getSandboxOwner(SEName, SEPFN, hostDN, "hosts") if not result["OK"]: return result _owner, ownerDN, ownerGroup = result["Value"] request = Request() request.RequestName = "RemoteSBDeletion:%s|%s:%s" % (SEName, SEPFN, time.time()) request.OwnerDN = ownerDN request.OwnerGroup = ownerGroup physicalRemoval = Operation() physicalRemoval.Type = "PhysicalRemoval" physicalRemoval.TargetSE = SEName fileToRemove = File() fileToRemove.PFN = SEPFN physicalRemoval.addFile(fileToRemove) request.addOperation(physicalRemoval) return ReqClient().putRequest(request) except Exception as e: gLogger.exception("Exception while setting deletion request") return S_ERROR("Cannot set deletion request: %s" % str(e)) else: gLogger.info("Deleting external Sandbox") try: return StorageElement(SEName).removeFile(SEPFN) except Exception: gLogger.exception("RM raised an exception while trying to delete a remote sandbox") return S_ERROR("RM raised an exception while trying to delete a remote sandbox")
def archiveRequestAndOp(listOfLFNs): """Return a tuple of the request and operation.""" req = Request() req.RequestName = 'MyRequest' op = Operation() switches = {} archiveLFN = '/vo/tars/myTar.tar' op.Arguments = DEncode.encode({'SourceSE': switches.get('SourceSE', 'SOURCE-SE'), 'TarballSE': switches.get('TarballSE', 'TARBALL-SE'), 'RegisterDescendent': False, 'ArchiveLFN': archiveLFN}) op.Type = 'ArchiveFiles' for index, lfn in enumerate(listOfLFNs): oFile = File() oFile.LFN = lfn oFile.Size = index oFile.Checksum = '01130a%0d' % index oFile.ChecksumType = 'adler32' op.addFile(oFile) req.addOperation(op) return req, op
def __setRemovalRequest(self, lfn, ownerDN, ownerGroup): """Set removal request with the given credentials""" oRequest = Request() oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup oRequest.RequestName = os.path.basename( lfn).strip() + "_removal_request.xml" oRequest.SourceComponent = "JobCleaningAgent" removeFile = Operation() removeFile.Type = "RemoveFile" removedFile = File() removedFile.LFN = lfn removeFile.addFile(removedFile) oRequest.addOperation(removeFile) # put the request with the owner certificate to make sure it's still a valid DN return ReqClient(useCertificates=True, delegatedDN=ownerDN, delegatedGroup=ownerGroup).putRequest(oRequest)
def addRegisterReplica( self, opFile, targetSE ): """ add RegisterReplica operation for file :param File opFile: operation file :param str targetSE: target SE """ # # add RegisterReplica operation registerOperation = Operation() registerOperation.Type = "RegisterFile" registerOperation.TargetSE = targetSE registerFile = File() registerFile.LFN = opFile.LFN registerFile.PFN = opFile.PFN registerFile.GUID = opFile.GUID registerFile.Checksum = opFile.Checksum registerFile.ChecksumType = opFile.ChecksumType registerFile.Size = opFile.Size registerOperation.addFile( registerFile ) self.request.insertAfter( registerOperation, self.operation ) return S_OK()
def addRegisterReplica(self, opFile, targetSE): """ add RegisterReplica operation for file :param File opFile: operation file :param str targetSE: target SE """ # # add RegisterReplica operation registerOperation = Operation() registerOperation.Type = "RegisterFile" registerOperation.TargetSE = targetSE registerFile = File() registerFile.LFN = opFile.LFN registerFile.PFN = opFile.PFN registerFile.GUID = opFile.GUID registerFile.Checksum = opFile.Checksum registerFile.ChecksumType = opFile.ChecksumType registerFile.Size = opFile.Size registerOperation.addFile(registerFile) self.request.insertAfter(registerOperation, self.operation) return S_OK()
def _setRegistrationRequest( self, lfn, targetSE, fileDict, catalog ): """ Sets a registration request :param str lfn: LFN :param list se: list of SE (or just string) :param list catalog: list (or string) of catalogs to use :param dict fileDict: file metadata """ self.log.info( 'Setting registration request for %s at %s.' % ( lfn, targetSE ) ) if not isinstance( catalog, list ): catalog = [catalog] for cat in catalog: register = Operation() register.Type = "RegisterFile" register.Catalog = cat register.TargetSE = targetSE regFile = File() regFile.LFN = lfn regFile.Checksum = fileDict.get( "Checksum", "" ) regFile.ChecksumType = fileDict.get( "ChecksumType", self.defaultChecksumType ) regFile.Size = fileDict.get( "Size", 0 ) regFile.GUID = fileDict.get( "GUID", "" ) se = StorageElement( targetSE ) pfn = se.getURL( lfn, self.registrationProtocols ) if not pfn["OK"] or lfn not in pfn["Value"]['Successful']: self.log.error( "Unable to get PFN for LFN", "%s" % pfn.get( 'Message', pfn.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) ) return pfn regFile.PFN = pfn["Value"]['Successful'][lfn] register.addFile( regFile ) self.request.addOperation( register ) return S_OK()
def __register(self, request, operation, toRegister): """ add RegisterReplica operation :param Request request: request instance :param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob :param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register """ log = self.log.getSubLogger("%s/registerFiles" % request.RequestName) byTarget = {} for ftsFile in toRegister: if ftsFile.TargetSE not in byTarget: byTarget.setdefault(ftsFile.TargetSE, []) byTarget[ftsFile.TargetSE].append(ftsFile) log.info("will create %s 'RegisterReplica' operations" % len(byTarget)) for target, ftsFileList in byTarget.items(): log.info( "creating 'RegisterReplica' operation for targetSE %s with %s files..." % (target, len(ftsFileList))) registerOperation = Operation() registerOperation.Type = "RegisterReplica" registerOperation.Status = "Waiting" registerOperation.TargetSE = target targetSE = self.getSE(target) for ftsFile in ftsFileList: opFile = File() opFile.LFN = ftsFile.LFN pfn = targetSE.getPfnForProtocol(ftsFile.TargetSURL, "SRM2", withPort=False) if not pfn["OK"]: continue opFile.PFN = pfn["Value"] registerOperation.addFile(opFile) request.insertBefore(registerOperation, operation) return S_OK()
def getRegisterOperation( self, opFile, targetSE, type = 'RegisterFile', catalog = None ): """ add RegisterReplica operation for file :param File opFile: operation file :param str targetSE: target SE """ # # add RegisterReplica operation registerOperation = Operation() registerOperation.Type = type registerOperation.TargetSE = targetSE if catalog: registerOperation.Catalog = catalog registerFile = File() registerFile.LFN = opFile.LFN registerFile.PFN = StorageElement( targetSE ).getURL( opFile.LFN, protocol = self.registrationProtocols ).get( 'Value', {} ).get( 'Successful', {} ).get( opFile.LFN ) registerFile.GUID = opFile.GUID registerFile.Checksum = opFile.Checksum registerFile.ChecksumType = opFile.ChecksumType registerFile.Size = opFile.Size registerOperation.addFile( registerFile ) return registerOperation
def _cleanUp(self, final): """ Clean up uploaded data for the LFNs in the list """ lfnList = [] for _fileName, metadata in final.items(): lfnList.append(metadata['lfn']) self.log.verbose("Cleaning up the request, for LFNs: %s" % ', '.join(lfnList)) newRequest = Request() for op in self.request: add = True if op.Type in [ 'PutAndRegister', 'ReplicateAndRegister', 'RegisterFile', 'RegisterReplica', 'RemoveReplica' ]: for files in op: if files.LFN in lfnList: add = False if add: newRequest.addOperation(op) self.request = newRequest self.log.verbose( "And adding RemoveFile operation for LFNs: %s, just in case" % ', '.join(lfnList)) removeFiles = Operation() removeFiles.Type = 'RemoveFile' for lfn in lfnList: removedFile = File() removedFile.LFN = lfn removeFiles.addFile(removedFile) self.request.addOperation(removeFiles)
def _setFileReplicationRequest(self, lfn, targetSE, fileMetaDict, sourceSE=''): """ Sets a registration request. """ self.log.info('Setting ReplicateAndRegister request for %s to %s' % (lfn, targetSE)) transfer = Operation() transfer.Type = "ReplicateAndRegister" transfer.TargetSE = targetSE if sourceSE: transfer.SourceSE = sourceSE trFile = File() trFile.LFN = lfn cksm = fileMetaDict.get("Checksum", None) cksmType = fileMetaDict.get("ChecksumType", self.defaultChecksumType) if cksm and cksmType: trFile.Checksum = cksm trFile.ChecksumType = cksmType size = fileMetaDict.get("Size", 0) if size: trFile.Size = size guid = fileMetaDict.get("GUID", "") if guid: trFile.GUID = guid transfer.addFile(trFile) self.request.addOperation(transfer) return S_OK()
if not lfnChunk: gLogger.error( "LFN list is empty!!!" ) error = -1 continue if len( lfnChunk ) > Operation.MAX_FILES: gLogger.error( "too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES ) error = -1 continue count += 1 request = Request() request.RequestName = requestName if not multiRequests else '%s_%d' % ( requestName, count ) replicateAndRegister = Operation() replicateAndRegister.Type = "ReplicateAndRegister" replicateAndRegister.TargetSE = ",".join( targetSEs ) if catalog is not None: replicateAndRegister.Catalog = catalog for lfn in lfnChunk: metaDict = metaDatas["Successful"][lfn] opFile = File() opFile.LFN = lfn opFile.Size = metaDict["Size"] if "Checksum" in metaDict: # # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD' opFile.Checksum = metaDict["Checksum"] opFile.ChecksumType = "ADLER32" replicateAndRegister.addFile( opFile )
def prepareTransformationTasks(self, transBody, taskDict, owner='', ownerGroup='', ownerDN=''): """ Prepare tasks, given a taskDict, that is created (with some manipulation) by the DB """ if (not owner) or (not ownerGroup): res = getProxyInfo(False, False) if not res['OK']: return res proxyInfo = res['Value'] owner = proxyInfo['username'] ownerGroup = proxyInfo['group'] if not ownerDN: res = getDNForUsername(owner) if not res['OK']: return res ownerDN = res['Value'][0] requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split(';') except AttributeError: pass for taskID in sorted(taskDict): paramDict = taskDict[taskID] if paramDict['InputData']: transID = paramDict['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = paramDict['TargetSE'] if isinstance(paramDict['InputData'], list): files = paramDict['InputData'] elif isinstance(paramDict['InputData'], basestring): files = paramDict['InputData'].split(';') for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile(trFile) oRequest.addOperation(transfer) oRequest.RequestName = _requestName(transID, taskID) oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup isValid = self.requestValidator.validate(oRequest) if not isValid['OK']: return isValid taskDict[taskID]['TaskObject'] = oRequest return S_OK(taskDict)
def main(): # Registering arguments will automatically add their description to the help menu Script.registerArgument(" sourceSE: source SE") Script.registerArgument(" LFN: LFN or file containing a List of LFNs") Script.registerArgument(["targetSE: target SEs"]) Script.parseCommandLine() import DIRAC from DIRAC import gLogger # parseCommandLine show help when mandatory arguments are not specified or incorrect argument args = Script.getPositionalArgs() sourceSE = args[0] lfnList = getLFNList(args[1]) targetSEs = list(set([se for targetSE in args[2:] for se in targetSE.split(",")])) gLogger.info( "Will create request with 'MoveReplica' " "operation using %s lfns and %s target SEs" % (len(lfnList), len(targetSEs)) ) from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File from DIRAC.Resources.Catalog.FileCatalog import FileCatalog from DIRAC.Core.Utilities.List import breakListIntoChunks lfnChunks = breakListIntoChunks(lfnList, 100) multiRequests = len(lfnChunks) > 1 error = 0 count = 0 reqClient = ReqClient() fc = FileCatalog() for lfnChunk in lfnChunks: metaDatas = fc.getFileMetadata(lfnChunk) if not metaDatas["OK"]: gLogger.error("unable to read metadata for lfns: %s" % metaDatas["Message"]) error = -1 continue metaDatas = metaDatas["Value"] for failedLFN, reason in metaDatas["Failed"].items(): gLogger.error("skipping %s: %s" % (failedLFN, reason)) lfnChunk = set(metaDatas["Successful"]) if not lfnChunk: gLogger.error("LFN list is empty!!!") error = -1 continue if len(lfnChunk) > Operation.MAX_FILES: gLogger.error("too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES) error = -1 continue count += 1 request = Request() request.RequestName = "%s_%s" % ( md5(repr(time.time()).encode()).hexdigest()[:16], md5(repr(time.time()).encode()).hexdigest()[:16], ) moveReplica = Operation() moveReplica.Type = "MoveReplica" moveReplica.SourceSE = sourceSE moveReplica.TargetSE = ",".join(targetSEs) for lfn in lfnChunk: metaDict = metaDatas["Successful"][lfn] opFile = File() opFile.LFN = lfn opFile.Size = metaDict["Size"] if "Checksum" in metaDict: # # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD' opFile.Checksum = metaDict["Checksum"] opFile.ChecksumType = "ADLER32" moveReplica.addFile(opFile) request.addOperation(moveReplica) result = reqClient.putRequest(request) if not result["OK"]: gLogger.error("Failed to submit Request: %s" % (result["Message"])) error = -1 continue if not multiRequests: gLogger.always("Request %d submitted successfully" % result["Value"]) if multiRequests: gLogger.always("%d requests have been submitted" % (count)) DIRAC.exit(error)
def test_05FTS(self): """ FTS state machine """ req = Request() req.RequestName = "FTSTest" ftsTransfer = Operation() ftsTransfer.Type = "ReplicateAndRegister" ftsTransfer.TargetSE = "CERN-USER" ftsFile = File() ftsFile.LFN = "/a/b/c" ftsFile.Checksum = "123456" ftsFile.ChecksumType = "Adler32" ftsTransfer.addFile(ftsFile) req.addOperation(ftsTransfer) self.assertEqual(req.Status, "Waiting", "1. wrong request status: %s" % req.Status) self.assertEqual(ftsTransfer.Status, "Waiting", "1. wrong ftsStatus status: %s" % ftsTransfer.Status) # # scheduled ftsFile.Status = "Scheduled" self.assertEqual( ftsTransfer.Status, "Scheduled", "2. wrong status for ftsTransfer: %s" % ftsTransfer.Status) self.assertEqual(req.Status, "Scheduled", "2. wrong status for request: %s" % req.Status) # # add new operation before FTS insertBefore = Operation() insertBefore.Type = "RegisterReplica" insertBefore.TargetSE = "CERN-USER" insertFile = File() insertFile.LFN = "/a/b/c" insertFile.PFN = "http://foo/bar" insertBefore.addFile(insertFile) req.insertBefore(insertBefore, ftsTransfer) self.assertEqual( insertBefore.Status, "Waiting", "3. wrong status for insertBefore: %s" % insertBefore.Status) self.assertEqual( ftsTransfer.Status, "Scheduled", "3. wrong status for ftsStatus: %s" % ftsTransfer.Status) self.assertEqual(req.Status, "Waiting", "3. wrong status for request: %s" % req.Status) # # prev done insertFile.Status = "Done" self.assertEqual( insertBefore.Status, "Done", "4. wrong status for insertBefore: %s" % insertBefore.Status) self.assertEqual( ftsTransfer.Status, "Scheduled", "4. wrong status for ftsStatus: %s" % ftsTransfer.Status) self.assertEqual(req.Status, "Scheduled", "4. wrong status for request: %s" % req.Status) # # reschedule ftsFile.Status = "Waiting" self.assertEqual( insertBefore.Status, "Done", "5. wrong status for insertBefore: %s" % insertBefore.Status) self.assertEqual( ftsTransfer.Status, "Waiting", "5. wrong status for ftsStatus: %s" % ftsTransfer.Status) self.assertEqual(req.Status, "Waiting", "5. wrong status for request: %s" % req.Status) # # fts done ftsFile.Status = "Done" self.assertEqual( insertBefore.Status, "Done", "5. wrong status for insertBefore: %s" % insertBefore.Status) self.assertEqual( ftsTransfer.Status, "Done", "5. wrong status for ftsStatus: %s" % ftsTransfer.Status) self.assertEqual(req.Status, "Done", "5. wrong status for request: %s" % req.Status)
from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator from DIRAC.Resources.Catalog.FileCatalog import FileCatalog reqClient = ReqClient() fc = FileCatalog() for lfnList in breakListIntoChunks( lfns, 100 ): oRequest = Request() oRequest.RequestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] ) replicateAndRegister = Operation() replicateAndRegister.Type = 'ReplicateAndRegister' replicateAndRegister.TargetSE = targetSE res = fc.getFileMetadata( lfnList ) if not res['OK']: print "Can't get file metadata: %s" % res['Message'] DIRAC.exit( 1 ) if res['Value']['Failed']: print "Could not get the file metadata of the following, so skipping them:" for fFile in res['Value']['Failed']: print fFile lfnMetadata = res['Value']['Successful'] for lfn in lfnMetadata: rarFile = File()
def prepareTransformationTasks(self, transBody, taskDict, owner='', ownerGroup='', ownerDN=''): """ Prepare tasks, given a taskDict, that is created (with some manipulation) by the DB """ if not taskDict: return S_OK({}) if (not owner) or (not ownerGroup): res = getProxyInfo(False, False) if not res['OK']: return res proxyInfo = res['Value'] owner = proxyInfo['username'] ownerGroup = proxyInfo['group'] if not ownerDN: res = getDNForUsername(owner) if not res['OK']: return res ownerDN = res['Value'][0] requestOperation = 'ReplicateAndRegister' if transBody: try: _requestType, requestOperation = transBody.split(';') except AttributeError: pass # Do not remove sorted, we might pop elements in the loop for taskID in sorted(taskDict): paramDict = taskDict[taskID] transID = paramDict['TransformationID'] oRequest = Request() transfer = Operation() transfer.Type = requestOperation transfer.TargetSE = paramDict['TargetSE'] # If there are input files if paramDict['InputData']: if isinstance(paramDict['InputData'], list): files = paramDict['InputData'] elif isinstance(paramDict['InputData'], basestring): files = paramDict['InputData'].split(';') for lfn in files: trFile = File() trFile.LFN = lfn transfer.addFile(trFile) oRequest.addOperation(transfer) oRequest.RequestName = _requestName(transID, taskID) oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup isValid = self.requestValidator.validate(oRequest) if not isValid['OK']: self.log.error("Error creating request for task", "%s %s" % (taskID, isValid)) # This works because we loop over a copy of the keys ! taskDict.pop(taskID) continue taskDict[taskID]['TaskObject'] = oRequest return S_OK(taskDict)
def main(): # Registering arguments will automatically add their description to the help menu Script.registerArgument(" SE: StorageElement|All") Script.registerArgument(["LFN: LFN or file containing a List of LFNs"]) Script.parseCommandLine(ignoreErrors=False) # parseCommandLine show help when mandatory arguments are not specified or incorrect argument args = Script.getPositionalArgs() targetSE = args.pop(0) lfns = [] for inputFileName in args: if os.path.exists(inputFileName): with open(inputFileName, "r") as inputFile: string = inputFile.read() lfns.extend([lfn.strip() for lfn in string.splitlines()]) else: lfns.append(inputFileName) from DIRAC.Resources.Storage.StorageElement import StorageElement import DIRAC # Check is provided SE is OK if targetSE != "All": se = StorageElement(targetSE) if not se.valid: print(se.errorReason) print() Script.showHelp() from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator from DIRAC.Resources.Catalog.FileCatalog import FileCatalog reqClient = ReqClient() fc = FileCatalog() requestOperation = "RemoveReplica" if targetSE == "All": requestOperation = "RemoveFile" for lfnList in breakListIntoChunks(lfns, 100): oRequest = Request() requestName = "%s_%s" % ( md5(repr(time.time()).encode()).hexdigest()[:16], md5(repr(time.time()).encode()).hexdigest()[:16], ) oRequest.RequestName = requestName oOperation = Operation() oOperation.Type = requestOperation oOperation.TargetSE = targetSE res = fc.getFileMetadata(lfnList) if not res["OK"]: print("Can't get file metadata: %s" % res["Message"]) DIRAC.exit(1) if res["Value"]["Failed"]: print( "Could not get the file metadata of the following, so skipping them:" ) for fFile in res["Value"]["Failed"]: print(fFile) lfnMetadata = res["Value"]["Successful"] for lfn in lfnMetadata: rarFile = File() rarFile.LFN = lfn rarFile.Size = lfnMetadata[lfn]["Size"] rarFile.Checksum = lfnMetadata[lfn]["Checksum"] rarFile.GUID = lfnMetadata[lfn]["GUID"] rarFile.ChecksumType = "ADLER32" oOperation.addFile(rarFile) oRequest.addOperation(oOperation) isValid = RequestValidator().validate(oRequest) if not isValid["OK"]: print("Request is not valid: ", isValid["Message"]) DIRAC.exit(1) result = reqClient.putRequest(oRequest) if result["OK"]: print("Request %d Submitted" % result["Value"]) else: print("Failed to submit Request: ", result["Message"])
reqClient = ReqClient() fc = FileCatalog() requestOperation = 'RemoveReplica' if targetSE == 'All': requestOperation = 'RemoveFile' for lfnList in breakListIntoChunks( lfns, 100 ): oRequest = Request() requestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] ) oRequest.RequestName = requestName oOperation = Operation() oOperation.Type = requestOperation oOperation.TargetSE = targetSE res = fc.getFileMetadata( lfnList ) if not res['OK']: print "Can't get file metadata: %s" % res['Message'] DIRAC.exit( 1 ) if res['Value']['Failed']: print "Could not get the file metadata of the following, so skipping them:" for fFile in res['Value']['Failed']: print fFile lfnMetadata = res['Value']['Successful'] for lfn in lfnMetadata: rarFile = File()
def test_04Operations(self): """ operations arithmetic and state machine """ req = Request() self.assertEqual(len(req), 0) transfer = Operation() transfer.Type = "ReplicateAndRegister" transfer.addFile(File({"LFN": "/a/b/c", "Status": "Waiting"})) getWaiting = req.getWaiting() self.assertEqual(getWaiting["OK"], True) self.assertEqual(getWaiting["Value"], None) req.addOperation(transfer) self.assertEqual(len(req), 1) self.assertEqual(transfer.Order, req.Order) self.assertEqual(transfer.Status, "Waiting") getWaiting = req.getWaiting() self.assertEqual(getWaiting["OK"], True) self.assertEqual(getWaiting["Value"], transfer) removal = Operation({"Type": "RemoveFile"}) removal.addFile(File({"LFN": "/a/b/c", "Status": "Waiting"})) req.insertBefore(removal, transfer) getWaiting = req.getWaiting() self.assertEqual(getWaiting["OK"], True) self.assertEqual(getWaiting["Value"], removal) self.assertEqual(len(req), 2) self.assertEqual([op.Status for op in req], ["Waiting", "Queued"]) self.assertEqual(req.subStatusList(), ["Waiting", "Queued"]) self.assertEqual(removal.Order, 0) self.assertEqual(removal.Order, req.Order) self.assertEqual(transfer.Order, 1) self.assertEqual(removal.Status, "Waiting") self.assertEqual(transfer.Status, "Queued") for subFile in removal: subFile.Status = "Done" removal.Status = "Done" self.assertEqual(removal.Status, "Done") self.assertEqual(transfer.Status, "Waiting") self.assertEqual(transfer.Order, req.Order) # # len, looping self.assertEqual(len(req), 2) self.assertEqual([op.Status for op in req], ["Done", "Waiting"]) self.assertEqual(req.subStatusList(), ["Done", "Waiting"]) digest = req.toJSON() self.assertEqual(digest["OK"], True) getWaiting = req.getWaiting() self.assertEqual(getWaiting["OK"], True) self.assertEqual(getWaiting["Value"], transfer)
def createRequest(self, requestName, archiveLFN, lfnChunk): """Create the Request.""" request = Request() request.RequestName = requestName self._checkReplicaSites(request, lfnChunk) archiveFiles = Operation() archiveFiles.Type = "ArchiveFiles" archiveFiles.Arguments = DEncode.encode({ "SourceSE": self.sourceSEs[0], "TarballSE": self.switches["TarballSE"], "RegisterDescendent": self.switches["RegisterDescendent"], "ArchiveLFN": archiveLFN, }) self.addLFNs(archiveFiles, lfnChunk) request.addOperation(archiveFiles) # Replicate the Tarball, ArchiveFiles will upload it if self.switches.get("ReplicateTarball"): replicateAndRegisterTarBall = Operation() replicateAndRegisterTarBall.Type = "ReplicateAndRegister" replicateAndRegisterTarBall.TargetSE = self.targetSE opFile = File() opFile.LFN = archiveLFN replicateAndRegisterTarBall.addFile(opFile) request.addOperation(replicateAndRegisterTarBall) if self.switches.get("CheckMigration"): checkMigrationTarBall = Operation() checkMigrationTarBall.Type = "CheckMigration" migrationTarget = self.targetSE if self.switches.get( "ReplicateTarball") else self.switches["TarballSE"] checkMigrationTarBall.TargetSE = migrationTarget opFile = File() opFile.LFN = archiveLFN checkMigrationTarBall.addFile(opFile) request.addOperation(checkMigrationTarBall) # Register Archive Replica for LFNs if self.switches.get("ArchiveSE"): registerArchived = Operation() registerArchived.Type = "RegisterReplica" registerArchived.TargetSE = self.switches.get("ArchiveSE") self.addLFNs(registerArchived, lfnChunk, addPFN=True) request.addOperation(registerArchived) # Remove all Other Replicas for LFNs if self.switches.get("RemoveReplicas"): removeArchiveReplicas = Operation() removeArchiveReplicas.Type = "RemoveReplica" removeArchiveReplicas.TargetSE = ",".join(self.replicaSEs) self.addLFNs(removeArchiveReplicas, lfnChunk) request.addOperation(removeArchiveReplicas) # Remove all Replicas for LFNs if self.switches.get("RemoveFiles"): removeArchiveFiles = Operation() removeArchiveFiles.Type = "RemoveFile" self.addLFNs(removeArchiveFiles, lfnChunk) request.addOperation(removeArchiveFiles) # Remove Original tarball replica if self.switches.get("ReplicateTarball"): removeTarballOrg = Operation() removeTarballOrg.Type = "RemoveReplica" removeTarballOrg.TargetSE = self.sourceSEs[0] opFile = File() opFile.LFN = archiveLFN removeTarballOrg.addFile(opFile) request.addOperation(removeTarballOrg) return request
operation.toSQL() except Exception, error: self.assertEqual(isinstance(error, AttributeError), True, "wrong exc raised") self.assertEqual(str(error), "RequestID not set", "wrong exc reason") # # parent set, no OperationID, INSERT request.addOperation(operation) toSQL = operation.toSQL() self.assertEqual(toSQL["OK"], True, "toSQL error") self.assertEqual(toSQL["Value"].startswith("INSERT"), True, "OperationID not set, but SQL start with UPDATE") op2 = Operation() op2.Type = "RemoveReplica" request.insertBefore(op2, operation) # # OperationID set = UPDATE operation.OperationID = 1 toSQL = operation.toSQL() self.assertEqual(toSQL["OK"], True, "toSQL error") self.assertEqual(toSQL["Value"].startswith("UPDATE"), True, "OperationID set, but SQL starts with INSERT") def test04StateMachine(self): """ state machine """ op = Operation() self.assertEqual(op.Status, "Queued", "1. wrong status %s" % op.Status)
def createRequest(self, requestName, archiveLFN, lfnChunk): """Create the Request.""" request = Request() request.RequestName = requestName self._checkReplicaSites(request, lfnChunk) archiveFiles = Operation() archiveFiles.Type = 'ArchiveFiles' archiveFiles.Arguments = DEncode.encode({'SourceSE': self.sourceSEs[0], 'TarballSE': self.switches['TarballSE'], 'RegisterDescendent': self.switches['RegisterDescendent'], 'ArchiveLFN': archiveLFN}) self.addLFNs(archiveFiles, lfnChunk) request.addOperation(archiveFiles) # Replicate the Tarball, ArchiveFiles will upload it if self.switches.get('ReplicateTarball'): replicateAndRegisterTarBall = Operation() replicateAndRegisterTarBall.Type = 'ReplicateAndRegister' replicateAndRegisterTarBall.TargetSE = self.targetSE opFile = File() opFile.LFN = archiveLFN replicateAndRegisterTarBall.addFile(opFile) request.addOperation(replicateAndRegisterTarBall) if self.switches.get('CheckMigration'): checkMigrationTarBall = Operation() checkMigrationTarBall.Type = 'CheckMigration' migrationTarget = self.targetSE if self.switches.get('ReplicateTarball') else self.switches['TarballSE'] checkMigrationTarBall.TargetSE = migrationTarget opFile = File() opFile.LFN = archiveLFN checkMigrationTarBall.addFile(opFile) request.addOperation(checkMigrationTarBall) # Register Archive Replica for LFNs if self.switches.get('ArchiveSE'): registerArchived = Operation() registerArchived.Type = 'RegisterReplica' registerArchived.TargetSE = self.switches.get('ArchiveSE') self.addLFNs(registerArchived, lfnChunk, addPFN=True) request.addOperation(registerArchived) # Remove all Other Replicas for LFNs if self.switches.get('RemoveReplicas'): removeArchiveReplicas = Operation() removeArchiveReplicas.Type = 'RemoveReplica' removeArchiveReplicas.TargetSE = ','.join(self.replicaSEs) self.addLFNs(removeArchiveReplicas, lfnChunk) request.addOperation(removeArchiveReplicas) # Remove all Replicas for LFNs if self.switches.get('RemoveFiles'): removeArchiveFiles = Operation() removeArchiveFiles.Type = 'RemoveFile' self.addLFNs(removeArchiveFiles, lfnChunk) request.addOperation(removeArchiveFiles) # Remove Original tarball replica if self.switches.get('ReplicateTarball'): removeTarballOrg = Operation() removeTarballOrg.Type = 'RemoveReplica' removeTarballOrg.TargetSE = self.sourceSEs[0] opFile = File() opFile.LFN = archiveLFN removeTarballOrg.addFile(opFile) request.addOperation(removeTarballOrg) return request
from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File from DIRAC.RequestManagementSystem.private.RequestValidator import gRequestValidator from DIRAC.DataManagementSystem.Client.ReplicaManager import ReplicaManager reqClient = ReqClient() rm = ReplicaManager() for lfnList in breakListIntoChunks(lfns, 100): oRequest = Request() oRequest.RequestName = "%s_%s" % (md5(repr(time.time())).hexdigest()[:16], md5(repr(time.time())).hexdigest()[:16]) replicateAndRegister = Operation() replicateAndRegister.Type = 'ReplicateAndRegister' replicateAndRegister.TargetSE = targetSE res = rm.getCatalogFileMetadata(lfnList) if not res['OK']: print "Can't get file metadata: %s" % res['Message'] DIRAC.exit(1) if res['Value']['Failed']: print "Could not get the file metadata of the following, so skipping them:" for fFile in res['Value']['Failed']: print fFile lfnMetadata = res['Value']['Successful'] for lfn in lfnMetadata: rarFile = File()
continue if len(lfnChunk) > Operation.MAX_FILES: gLogger.error( "too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES) error = -1 continue count += 1 request = Request() request.RequestName = requestName if not multiRequests else '%s_%d' % ( requestName, count) replicateAndRegister = Operation() replicateAndRegister.Type = "ReplicateAndRegister" replicateAndRegister.TargetSE = ",".join(targetSEs) if catalog is not None: replicateAndRegister.Catalog = catalog for lfn in lfnChunk: metaDict = metaDatas["Successful"][lfn] opFile = File() opFile.LFN = lfn opFile.Size = metaDict["Size"] if "Checksum" in metaDict: # # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD' opFile.Checksum = metaDict["Checksum"] opFile.ChecksumType = "ADLER32" replicateAndRegister.addFile(opFile)
def _multiOperationsBody(self, transJson, taskDict, ownerDN, ownerGroup): """Deal with a Request that has multiple operations :param transJson: list of lists of string and dictionaries, e.g.: .. code :: python body = [ ( "ReplicateAndRegister", { "SourceSE":"FOO-SRM", "TargetSE":"TASK:TargetSE" }), ( "RemoveReplica", { "TargetSE":"FOO-SRM" } ), ] If a value of an operation parameter in the body starts with ``TASK:``, we take it from the taskDict. For example ``TASK:TargetSE`` is replaced with ``task['TargetSE']`` :param dict taskDict: dictionary of tasks, modified in this function :param str ownerDN: certificate DN used for the requests :param str onwerGroup: dirac group used for the requests :returns: None """ for taskID, task in list(taskDict.items()): try: transID = task["TransformationID"] if not task.get("InputData"): raise StopTaskIteration("No input data") files = [] oRequest = Request() if isinstance(task["InputData"], list): files = task["InputData"] elif isinstance(task["InputData"], six.string_types): files = task["InputData"].split(";") # create the operations from the json structure for operationTuple in transJson: op = Operation() op.Type = operationTuple[0] for parameter, value in operationTuple[1].items(): # Here we massage a bit the body to replace some parameters # with what we have in the task. try: taskKey = value.split("TASK:")[1] value = task[taskKey] # Either the attribute is not a string (AttributeError) # or it does not start with 'TASK:' (IndexError) except (AttributeError, IndexError): pass # That happens when the requested substitution is not # a key in the task, and that's a problem except KeyError: raise StopTaskIteration( "Parameter %s does not exist in taskDict" % taskKey) setattr(op, parameter, value) for lfn in files: opFile = File() opFile.LFN = lfn op.addFile(opFile) oRequest.addOperation(op) result = self._assignRequestToTask(oRequest, taskDict, transID, taskID, ownerDN, ownerGroup) if not result["OK"]: raise StopTaskIteration( "Could not assign request to task: %s" % result["Message"]) except StopTaskIteration as e: self._logError("Error creating request for task", "%s, %s" % (taskID, e), transID=transID) taskDict.pop(taskID)
def createRequest(reqType): r = Request() # Simple failover op1 = Operation() f = File() f.LFN = '/This/is/an/LFN' op1.addFile(f) op1.Type = 'ReplicateAndRegister' op1.SourceSE = 'CERN-FAILOVER' op1.TargetSE = 'CERN-BUFFER' r.addOperation(op1) op2 = Operation() op2.addFile(f) op2.Type = 'RemoveReplica' op2.TargetSE = 'CERN-FAILOVER' r.addOperation(op2) if reqType == 0: return r # two files for Failover f1 = File() f1.LFN = '/This/is/a/second/LFN' op3 = Operation() op3.addFile(f1) op3.Type = 'ReplicateAndRegister' op3.SourceSE = 'CERN-FAILOVER' op3.TargetSE = 'CERN-BUFFER' r.addOperation(op3) op3 = Operation() op3.addFile(f1) op3.Type = 'RemoveReplica' op3.TargetSE = 'CERN-FAILOVER' r.addOperation(op3) if reqType == 1: return r op = Operation() op.Type = 'ForwardDiset' if reqType == 2: r.addOperation(op) return r r.insertBefore(op, r[0]) if reqType == 3: return r op4 = Operation() op4.Type = 'ForwardDiset' r.addOperation(op4) if reqType == 4: return r # 2 different FAILOVER SEs: removal not optimized r[1].SourceSE = 'RAL-FAILOVER' r[2].SourceSE = 'RAL-FAILOVER' if reqType == 5: return r # 2 different destinations, same FAILOVER: replication not optimized r[3].SourceSE = 'RAL-FAILOVER' r[4].SourceSE = 'RAL-FAILOVER' r[3].TargetSE = 'RAL-BUFFER' if reqType == 6: return r print 'This should not happen, reqType =', reqType