def __setRemovalRequest(self, lfn, ownerDN, ownerGroup): """ Set removal request with the given credentials """ request = RequestContainer() request.setRequestAttributes({ 'OwnerDN': ownerDN, 'OwnerGroup': ownerGroup }) requestName = os.path.basename(lfn).strip() + '_removal_request.xml' request.setRequestName(requestName) request.setSourceComponent('JobCleaningAgent') removalDict = { 'Attributes': { 'Operation': 'removeFile', 'TargetSE': '', 'ExecutionOrder': 0 } } result = request.addSubRequest(removalDict, 'removal') if not result['OK']: return result index = result['Value'] fileDict = {'LFN': lfn, 'PFN': '', 'Status': 'Waiting'} request.setSubRequestFiles(index, 'removal', [fileDict]) client = RequestClient() result = request.toXML() if not result['OK']: return result xmlRequest = result['Value'] result = client.setRequest(requestName, xmlRequest) return result
def initialize(self): self.RequestDBClient = RequestClient() gMonitor.registerActivity("Iteration", "Agent Loops", "ZuziaAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Attempted", "Request Processed", "ZuziaRAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Successful", "Request Forward Successful", "ZuziaAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Failed", "Request Forward Failed", "ZuziaAgent", "Requests/min", gMonitor.OP_SUM) self.local = PathFinder.getServiceURL("RequestManagement/localURL") if not self.local: self.local = AgentModule.am_getOption(self, 'localURL', '') if not self.local: errStr = 'The RequestManagement/localURL option must be defined.' gLogger.fatal(errStr) return S_ERROR(errStr) self.central = PathFinder.getServiceURL("RequestManagement/centralURL") if not self.central: errStr = 'The RequestManagement/centralURL option must be defined.' gLogger.fatal(errStr) return S_ERROR(errStr) return S_OK()
def __init__( self, *args, **kwargs ): ''' c'tor ''' AgentModule.__init__( self, *args, **kwargs ) # # replica manager self.replicaManager = ReplicaManager() # # transformation client self.transClient = TransformationClient() # # wms client self.wmsClient = WMSClient() # # request client self.requestClient = RequestClient() # # file catalog clinet self.metadataClient = FileCatalogClient() # # placeholders for CS options # # transformations types self.transformationTypes = None # # directory locations self.directoryLocations = None # # transformation metadata self.transfidmeta = None # # archive periof in days self.archiveAfter = None # # active SEs self.activeStorages = None # # transformation log SEs self.logSE = None # # enable/disable execution self.enableFlag = None
def requestClient(cls): """ RequestClient getter :param cls: class reference """ if not cls.__requestClient: cls.__requestClient = RequestClient() return cls.__requestClient
def initialize(self): self.RequestDBClient = RequestClient() backend = self.am_getOption('Backend', '') self.RequestDB = False if backend == 'mysql': from DIRAC.RequestManagementSystem.DB.RequestDBMySQL import RequestDBMySQL requestDB = RequestDBMySQL() if requestDB._connected: self.RequestDB = requestDB gMonitor.registerActivity("Iteration", "Agent Loops", "DISETForwardingAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Attempted", "Request Processed", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Successful", "Request Forward Successful", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Failed", "Request Forward Failed", "DISETForwardingAgent", "Requests/min", gMonitor.OP_SUM) self.local = PathFinder.getServiceURL("RequestManagement/localURL") if not self.local: self.local = AgentModule.am_getOption(self, 'localURL', '') if not self.local: errStr = 'The RequestManagement/localURL option must be defined.' gLogger.fatal(errStr) return S_ERROR(errStr) return S_OK()
def __getRequestFileStatusOLDSystem(self, requestName, lfns): """ for the OLD RMS """ # FIXME: this should disappear try: return RequestClient().getRequestFileStatus(requestName, lfns) except RuntimeError: return S_ERROR('RuntimeError')
def _sendToFailover( rpcStub ): requestClient = RequestClient() request = RequestContainer() request.setDISETRequest( rpcStub ) requestStub = request.toXML()['Value'] return requestClient.setRequest( "Accounting.DataStore.%s.%s" % ( time.time(), random.random() ), requestStub )
def requestClient(cls): """ RequestClient getter :param cls: class reference """ if not cls.__requestClient: from DIRAC.RequestManagementSystem.Client.RequestClient import RequestClient cls.__requestClient = RequestClient() return cls.__requestClient
def initialize(self): self.graceRemovalPeriod = self.am_getOption('GraceRemovalPeriod',7) self.checkAssigned = self.am_getOption('CheckAssigned',True) self.ftsCleaning = self.am_getOption('FTSCleaning',True) self.requestClient = RequestClient() return S_OK()
def requestClient( self ): """ RequestClient getter :param self: self reference """ if not self.__requestClient: self.__requestClient = RequestClient() return self.__requestClient
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
def __init__(self, transClient=None, logger=None, requestClient=None): if not logger: logger = gLogger.getSubLogger('RequestTasks') super(RequestTasks, self).__init__(transClient, logger) if not requestClient: self.requestClient = RequestClient() else: self.requestClient = requestClient
def __getRequestFileStatusOLDSystem(self, requestName, lfns): """ for the OLD RMS """ # FIXME: this should disappear try: res = RequestClient().getRequestFileStatus(requestName, lfns) if res['OK']: return res['Value'] else: return {} except RuntimeError: return {}
def __getRequestIDOLDSystem(self, requestName): """ for the OLD RMS """ # FIXME: this should disappear try: res = RequestClient().getRequestInfo(requestName) if res['OK']: return res['Value'][0] else: return 0 except RuntimeError: return 0
def __init__(self, transClient=None, logger=None, requestClient=None): if not logger: logger = gLogger.getSubLogger('RequestTasks') super(RequestTasks, self).__init__(transClient, logger) if not requestClient: from DIRAC.RequestManagementSystem.Client.RequestClient import RequestClient self.requestClient = RequestClient() else: self.requestClient = requestClient
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
def initialize(self): """Sets defaults """ self.replicaManager = ReplicaManager() self.transClient = TransformationClient() self.wmsClient = WMSClient() self.requestClient = RequestClient() self.metadataClient = FileCatalogClient() self.storageUsageClient = StorageUsageClient() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') self.transformationTypes = sortList( self.am_getOption('TransformationTypes', [ 'MCSimulation', 'DataReconstruction', 'DataStripping', 'MCStripping', 'Merge', 'Replication' ])) gLogger.info("Will consider the following transformation types: %s" % str(self.transformationTypes)) self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'StorageUsage', 'MetadataCatalog'])) gLogger.info( "Will search for directories in the following locations: %s" % str(self.directoryLocations)) self.transfidmeta = self.am_getOption('TransfIDMeta', "TransformationID") gLogger.info("Will use %s as metadata tag name for TransformationID" % self.transfidmeta) self.archiveAfter = self.am_getOption('ArchiveAfter', 7) # days gLogger.info("Will archive Completed transformations after %d days" % self.archiveAfter) self.activeStorages = sortList(self.am_getOption('ActiveSEs', [])) gLogger.info("Will check the following storage elements: %s" % str(self.activeStorages)) self.logSE = self.am_getOption('TransformationLogSE', 'LogSE') gLogger.info("Will remove logs found on storage element: %s" % self.logSE) return S_OK()
def __deleteSandboxFromExternalBackend(self, SEName, SEPFN): if self.getCSOption("DelayedExternalDeletion", True): gLogger.info("Setting deletion request") try: request = RequestContainer() result = request.addSubRequest( { 'Attributes': { 'Operation': 'removePhysicalFile', 'TargetSE': SEName, 'ExecutionOrder': 1 } }, 'removal') index = result['Value'] fileDict = {'PFN': SEPFN, 'Status': 'Waiting'} request.setSubRequestFiles(index, 'removal', [fileDict]) return RequestClient().setRequest( "RemoteSBDeletion:%s|%s:%s" % (SEName, SEPFN, time.time()), request.toXML()['Value']) except Exception, e: gLogger.exception("Exception while setting deletion request") return S_ERROR("Cannot set deletion request: %s" % str(e))
def setUp( self ): """ test setup :param self: self reference """ self.request = Request() self.request.RequestName = "RequestManagerHandlerTests" self.request.OwnerDN = "/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=cibak/CN=605919/CN=Krzysztof Ciba" self.request.OwnerGroup = "dirac_user" self.operation = Operation() self.operation.Type = "ReplicateAndRegister" self.operation.TargetSE = "CERN-USER" self.file = File() self.file.LFN = "/lhcb/user/c/cibak/testFile" self.file.Checksum = "123456" self.file.ChecksumType = "ADLER32" self.request.addOperation( self.operation ) self.operation.addFile( self.file ) # # xml representation of a whole request self.xmlStr = self.request.toXML( True )["Value"] # # request client self.requestClient = RequestClient()
def __init__(self, agentName, baseAgentName=False, properties=dict()): """ c'tor :param self: self reference :param str agentName: name of agent :param bool baseAgentName: whatever :param dict properties: whatever else """ AgentModule.__init__(self, agentName, baseAgentName, properties) ## replica manager self.replicaManager = ReplicaManager() ## transformation client self.transClient = TransformationClient() ## wms client self.wmsClient = WMSClient() ## request client self.requestClient = RequestClient() ## file catalog clinet self.metadataClient = FileCatalogClient() ## storage usage agent self.storageUsageClient = StorageUsageClient() ## placeholders for CS options ## transformations types self.transformationTypes = None ## directory locations self.directoryLocations = None ## transformation metadata self.transfidmeta = None ## archive periof in days self.archiveAfter = None ## active SEs self.activeStorages = None ## transformation log SEs self.logSE = None ## enable/disable execution self.enableFlag = None
def requestClient( cls ): """ request client getter """ if not cls.__requestClient: cls.__requestClient = RequestClient() return cls.__requestClient
def __init__(self): TaskBase.__init__(self) self.requestClient = RequestClient()
def __removeWMSTasks( self, transJobIDs ): """ wipe out jobs and their requests from the system TODO: should check request status, maybe FTS files as well ??? :param self: self reference :param list trasnJobIDs: job IDs """ # Prevent 0 job IDs jobIDs = [ int( j ) for j in transJobIDs if int( j ) ] allRemove = True for jobList in breakListIntoChunks( jobIDs, 500 ): res = self.wmsClient.killJob( jobList ) if res['OK']: self.log.info( "Successfully killed %d jobs from WMS" % len( jobList ) ) elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ): self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) ) elif "NonauthorizedJobIDs" in res: self.log.error( "Failed to kill %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) ) allRemove = False elif "FailedJobIDs" in res: self.log.error( "Failed to kill %s jobs" % len( res['FailedJobIDs'] ) ) allRemove = False res = self.wmsClient.deleteJob( jobList ) if res['OK']: self.log.info( "Successfully removed %d jobs from WMS" % len( jobList ) ) elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ): self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) ) elif "NonauthorizedJobIDs" in res: self.log.error( "Failed to remove %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) ) allRemove = False elif "FailedJobIDs" in res: self.log.error( "Failed to remove %s jobs" % len( res['FailedJobIDs'] ) ) allRemove = False if not allRemove: return S_ERROR( "Failed to remove all remnants from WMS" ) self.log.info( "Successfully removed all tasks from the WMS" ) if not jobIDs: self.log.info( "JobIDs not present, unable to remove asociated requests." ) return S_OK() failed = 0 # FIXME: double request client: old/new -> only the new will survive sooner or later # this is the old try: res = RequestClient().getRequestForJobs( jobIDs ) if not res['OK']: self.log.error( "Failed to get requestID for jobs.", res['Message'] ) return res failoverRequests = res['Value'] self.log.info( "Found %d jobs with associated failover requests (in the old RMS)" % len( failoverRequests ) ) if not failoverRequests: return S_OK() for jobID, requestName in failoverRequests.items(): # Put this check just in case, tasks must have associated jobs if jobID == 0 or jobID == '0': continue res = RequestClient().deleteRequest( requestName ) if not res['OK']: self.log.error( "Failed to remove request from RequestDB", res['Message'] ) failed += 1 else: self.log.verbose( "Removed request %s associated to job %d." % ( requestName, jobID ) ) except RuntimeError: failoverRequests = {} pass # FIXME: and this is the new res = self.reqClient.getRequestNamesForJobs( jobIDs ) if not res['OK']: self.log.error( "Failed to get requestID for jobs.", res['Message'] ) return res failoverRequests.update( res['Value']['Successful'] ) if not failoverRequests: return S_OK() for jobID, requestName in res['Value']['Successful'].items(): # Put this check just in case, tasks must have associated jobs if jobID == 0 or jobID == '0': continue res = self.reqClient.deleteRequest( requestName ) if not res['OK']: self.log.error( "Failed to remove request from RequestDB", res['Message'] ) failed += 1 else: self.log.verbose( "Removed request %s associated to job %d." % ( requestName, jobID ) ) if failed: self.log.info( "Successfully removed %s requests" % ( len( failoverRequests ) - failed ) ) self.log.info( "Failed to remove %s requests" % failed ) return S_ERROR( "Failed to remove all the request from RequestDB" ) self.log.info( "Successfully removed all the associated failover requests" ) return S_OK()
def initialize(self): """ Called by the framework upon startup, before any cycle (execute method bellow) """ self.requestDBClient = RequestClient() # the RequestAgentMixIn needs the capitalized version, until is is fixed keep this. self.RequestDBClient = self.requestDBClient self.replicaManager = ReplicaManager() gMonitor.registerActivity("Iteration", "Agent Loops", "RemovalAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Execute", "Request Processed", "RemovalAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Done", "Request Completed", "RemovalAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("PhysicalRemovalAtt", "Physical removals attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("PhysicalRemovalDone", "Successful physical removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("PhysicalRemovalFail", "Failed physical removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("PhysicalRemovalSize", "Physically removed size", "RemovalAgent", "Bytes", gMonitor.OP_ACUM) gMonitor.registerActivity("ReplicaRemovalAtt", "Replica removal attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicaRemovalDone", "Successful replica removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicaRemovalFail", "Failed replica removals", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFileAtt", "File removal attempted", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFileDone", "File removal done", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFileFail", "File removal failed", "RemovalAgent", "Removal/min", gMonitor.OP_SUM) self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', self.maxNumberOfThreads) self.maxRequestsInQueue = self.am_getOption('RequestsInQueue', self.maxRequestsInQueue) self.threadPool = ThreadPool(1, self.maxNumberOfThreads, self.maxRequestsInQueue) # Set the ThreadPool in daemon mode to process new ThreadedJobs as they are inserted self.threadPool.daemonize() self.maxRequests = self.am_getOption('MaxRequestsPerCycle', 1200.) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
lfns.append(inputFileName) from DIRAC.Resources.Storage.StorageElement import StorageElement import DIRAC # Check is provided SE is OK if targetSE != 'All': se = StorageElement(targetSE) if not se.valid: print se.errorReason print Script.showHelp() from DIRAC.RequestManagementSystem.Client.RequestContainer import RequestContainer from DIRAC.RequestManagementSystem.Client.RequestClient import RequestClient requestClient = RequestClient() requestType = 'removal' requestOperation = 'replicaRemoval' if targetSE == 'All': requestOperation = 'removeFile' for lfnList in breakListIntoChunks(lfns, 100): oRequest = RequestContainer() subRequestIndex = oRequest.initiateSubRequest(requestType)['Value'] attributeDict = {'Operation': requestOperation, 'TargetSE': targetSE} oRequest.setSubRequestAttributes(subRequestIndex, requestType, attributeDict) files = [] for lfn in lfnList: files.append({'LFN': lfn})
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity("Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()