def initializeMonitoringHandler( serviceInfo ): #Check that the path is writable monitoringSection = PathFinder.getServiceSection( "Framework/Monitoring" ) #Get data location dataPath = gConfig.getValue( "%s/DataLocation" % monitoringSection, "data/monitoring" ) dataPath = dataPath.strip() if "/" != dataPath[0]: dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) ) gLogger.info( "Data will be written into %s" % dataPath ) try: os.makedirs( dataPath ) except: pass try: testFile = "%s/mon.jarl.test" % dataPath fd = file( testFile, "w" ) fd.close() os.unlink( testFile ) except IOError: gLogger.fatal( "Can't write to %s" % dataPath ) return S_ERROR( "Data location is not writable" ) #Define globals gServiceInterface.initialize( dataPath ) if not gServiceInterface.initializeDB(): return S_ERROR( "Can't start db engine" ) gMonitor.registerActivity( "cachedplots", "Cached plot images", "Monitoring plots", "plots", gMonitor.OP_SUM ) gMonitor.registerActivity( "drawnplots", "Drawn plot images", "Monitoring plots", "plots", gMonitor.OP_SUM ) return S_OK()
def initializePlottingHandler( serviceInfo ): #Get data location plottingSection = PathFinder.getServiceSection( "Framework/Plotting" ) dataPath = gConfig.getValue( "%s/DataLocation" % plottingSection, "data/graphs" ) dataPath = dataPath.strip() if "/" != dataPath[0]: dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) ) gLogger.info( "Data will be written into %s" % dataPath ) try: os.makedirs( dataPath ) except: pass try: testFile = "%s/plot__.test" % dataPath fd = file( testFile, "w" ) fd.close() os.unlink( testFile ) except IOError: gLogger.fatal( "Can't write to %s" % dataPath ) return S_ERROR( "Data location is not writable" ) gPlotCache.setPlotsLocation( dataPath ) gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Plotting requests", "plots", gMonitor.OP_SUM ) return S_OK()
def initialize( self ): """ Agent initialization. The extensions MUST provide in the initialize method the following data members: - TransformationClient objects (self.transClient), - set the shifterProxy if different from the default one set here ('ProductionManager') - list of transformation types to be looked (self.transType) """ gMonitor.registerActivity( "SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks", gMonitor.OP_ACUM ) self.pluginLocation = self.am_getOption( 'PluginLocation', 'DIRAC.TransformationSystem.Client.TaskManagerPlugin' ) # Default clients self.transClient = TransformationClient() # Bulk submission flag self.bulkSubmissionFlag = self.am_getOption( 'BulkSubmission', False ) # setting up the threading maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', 15 ) threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads ) self.log.verbose( "Multithreaded with %d threads" % maxNumberOfThreads ) for i in xrange( maxNumberOfThreads ): threadPool.generateJobAndQueueIt( self._execute, [i] ) return S_OK()
def loadHandler( pluginPath ): """ Create an instance of requested plugin class, loading and importing it when needed. This function could raise ImportError when plugin cannot be find or TypeError when loaded class object isn't inherited from BaseOperation class. :param str pluginName: dotted path to plugin, specified as in import statement, i.e. "DIRAC.CheesShopSystem.private.Cheddar" or alternatively in 'normal' path format "DIRAC/CheesShopSystem/private/Cheddar" :return: object instance This function try to load and instantiate an object from given path. It is assumed that: - :pluginPath: is pointing to module directory "importable" by python interpreter, i.e.: it's package's top level directory is in $PYTHONPATH env variable, - the module should consist a class definition following module name, - the class itself is inherited from DIRAC.RequestManagementSystem.private.BaseOperation.BaseOperation If above conditions aren't meet, function is throwing exceptions: - ImportError when class cannot be imported - TypeError when class isn't inherited from OperationHandlerBase """ if "/" in pluginPath: pluginPath = ".".join( [ chunk for chunk in pluginPath.split( "/" ) if chunk ] ) pluginName = pluginPath.split( "." )[-1] if pluginName not in globals(): mod = __import__( pluginPath, globals(), fromlist = [ pluginName ] ) pluginClassObj = getattr( mod, pluginName ) else: pluginClassObj = globals()[pluginName] if not issubclass( pluginClassObj, OperationHandlerBase ): raise TypeError( "operation handler '%s' isn't inherited from OperationHandlerBase class" % pluginName ) for key, status in ( ( "Att", "Attempted" ), ( "OK", "Successful" ) , ( "Fail", "Failed" ) ): gMonitor.registerActivity( "%s%s" % ( pluginName, key ), "%s operations %s" % ( pluginName, status ), "RequestExecutingAgent", "Operations/min", gMonitor.OP_SUM ) # # return an instance return pluginClassObj
def initializeMatcherHandler( serviceInfo ): """ Matcher Service initialization """ global gJobDB global gJobLoggingDB global gTaskQueueDB global gPilotAgentsDB gJobDB = JobDB() gJobLoggingDB = JobLoggingDB() gTaskQueueDB = TaskQueueDB() gPilotAgentsDB = PilotAgentsDB() gMonitor.registerActivity( 'matchTime', "Job matching time", 'Matching', "secs" , gMonitor.OP_MEAN, 300 ) gMonitor.registerActivity( 'matchesDone', "Job Match Request", 'Matching', "matches" , gMonitor.OP_RATE, 300 ) gMonitor.registerActivity( 'matchesOK', "Matched jobs", 'Matching', "matches" , gMonitor.OP_RATE, 300 ) gMonitor.registerActivity( 'numTQs', "Number of Task Queues", 'Matching', "tqsk queues" , gMonitor.OP_MEAN, 300 ) gTaskQueueDB.recalculateTQSharesForAll() gThreadScheduler.addPeriodicTask( 120, gTaskQueueDB.recalculateTQSharesForAll ) gThreadScheduler.addPeriodicTask( 60, sendNumTaskQueues ) sendNumTaskQueues() return S_OK()
def initialize( self ): """ agent initialization """ self.DEL_GRACE_DAYS = self.am_getOption( "DeleteGraceDays", self.DEL_GRACE_DAYS ) self.log.info( "Delete grace period = %s days" % self.DEL_GRACE_DAYS ) self.DEL_LIMIT = self.am_getOption( "DeleteLimitPerCycle", self.DEL_LIMIT ) self.log.info( "Delete FTSJob limit = %s" % self.DEL_LIMIT ) self.KICK_ASSIGNED_HOURS = self.am_getOption( "KickAssignedHours", self.KICK_ASSIGNED_HOURS ) self.log.info( "Kick assigned period = %s hours" % self.KICK_ASSIGNED_HOURS ) self.KICK_LIMIT = self.am_getOption( "KickLimitPerCycle", self.KICK_LIMIT ) self.log.info( "Kick FTSJobs limit = %s" % self.KICK_LIMIT ) gMonitor.registerActivity( "KickedFTSJobs", "Assigned FTSJobs kicked", "CleanFTSDBAgent", "FTSJobs/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "DeletedFTSJobs", "Deleted FTSJobs", "CleanFTSDBAgent", "FTSJobs/min", gMonitor.OP_SUM ) return S_OK()
def initialize( self ): ''' Make the necessary initializations ''' gMonitor.registerActivity( "Iteration", "Agent Loops", AGENT_NAME, "Loops/min", gMonitor.OP_SUM ) agentTSTypes = self.am_getOption( 'TransformationTypes', [] ) if agentTSTypes: self.transformationTypes = sorted( agentTSTypes ) else: dataProc = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] ) dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] ) self.transformationTypes = sorted( dataProc + dataManip ) extendables = Operations().getValue( 'Transformations/ExtendableTransfTypes', []) if extendables: for extendable in extendables: if extendable in self.transformationTypes: self.transformationTypes.remove(extendable) #This is because the Extendables do not use this Agent (have no Input data query) return S_OK()
def initialize( self ): """ initialization """ self.DEL_GRACE_DAYS = self.am_getOption( "DeleteGraceDays", self.DEL_GRACE_DAYS ) self.log.info( "Delete grace period = %s days" % self.DEL_GRACE_DAYS ) self.DEL_LIMIT = self.am_getOption( "DeleteLimit", self.DEL_LIMIT ) self.log.info( "Delete limit = %s request/cycle" % self.DEL_LIMIT ) self.DEL_FAILED = self.am_getOption( "DeleteFailed", self.DEL_FAILED ) self.log.info( "Delete failed requests: %s" % { True: "yes", False: "no"}[self.DEL_FAILED] ) self.KICK_GRACE_HOURS = self.am_getOption( "KickGraceHours", self.KICK_GRACE_HOURS ) self.log.info( "Kick assigned requests period = %s hours" % self.KICK_GRACE_HOURS ) self.KICK_LIMIT = self.am_getOption( "KickLimit", self.KICK_LIMIT ) self.log.info( "Kick limit = %s request/cycle" % self.KICK_LIMIT ) # # gMonitor stuff gMonitor.registerActivity( "DeletedRequests", "Deleted finished requests", "CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "KickedRequests", "Assigned requests kicked", "CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM ) return S_OK()
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ # # base classes ctor super( PutAndRegister, self ).__init__( operation, csPath ) # # gMonitor stuff gMonitor.registerActivity( "PutAtt", "File put attempts", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PutFail", "Failed file puts", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PutOK", "Successful file puts", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Successful file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Failed file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) self.dm = DataManager()
def initializeHandler( cls, serviceInfo ): multiPath = PathFinder.getDatabaseSection( "Accounting/MultiDB" ) cls.__acDB = MultiAccountingDB( multiPath, readOnly = True ) #Get data location reportSection = serviceInfo[ 'serviceSectionPath' ] dataPath = gConfig.getValue( "%s/DataLocation" % reportSection, "data/accountingGraphs" ) dataPath = dataPath.strip() if "/" != dataPath[0]: dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) ) gLogger.info( "Data will be written into %s" % dataPath ) mkDir( dataPath ) try: testFile = "%s/acc.jarl.test" % dataPath fd = file( testFile, "w" ) fd.close() os.unlink( testFile ) except IOError: gLogger.fatal( "Can't write to %s" % dataPath ) return S_ERROR( "Data location is not writable" ) gDataCache.setGraphsLocation( dataPath ) gMonitor.registerActivity( "plotsDrawn", "Drawn plot images", "Accounting reports", "plots", gMonitor.OP_SUM ) gMonitor.registerActivity( "reportsRequested", "Generated reports", "Accounting reports", "reports", gMonitor.OP_SUM ) return S_OK()
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param ~DIRAC.RequestManagementSystem.Client.Operation.Operation operation: Operation instance :param str csPath: cs config path """ DMSRequestOperationsBase.__init__( self, operation, csPath ) # # gMonitor stuff gMonitor.registerActivity( "PhysicalRemovalAtt", "Physical file removals attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalOK", "Successful file physical removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalFail", "Failed file physical removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "PhysicalRemovalSize", "Physically removed size", "RequestExecutingAgent", "Bytes", gMonitor.OP_ACUM )
def __init__( self, operation = None, csPath = None ): """c'tor """ OperationHandlerBase.__init__( self, operation, csPath ) # # RegisterFile specific monitor info gMonitor.registerActivity( "RegisterAtt", "Attempted file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Successful file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Failed file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
def __init__( self, operation = None, csPath = None ): """c'tor """ DMSRequestOperationsBase.__init__( self, operation, csPath ) # # RegisterReplica specific monitor info gMonitor.registerActivity( "RegisterReplicaAtt", "Attempted replicas registrations", "RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterReplicaOK", "Successful replicas registrations", "RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterReplicaFail", "Failed replicas registrations", "RequestExecutingAgent", "Replicas/min", gMonitor.OP_SUM )
def __init__( self, operation = None, csPath = None ): """c'tor """ # # base class ctor DMSRequestOperationsBase.__init__( self, operation, csPath ) # # gMonitor stuff gMonitor.registerActivity( "FileReTransferAtt", "File retransfers attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FileReTransferOK", "File retransfers successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FileReTransferFail", "File retransfers failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
def initialize( self ): """ service initialization :param self: self reference """ gLogger.notice( "CacheDirectory: %s" % self.cacheDir() ) gMonitor.registerActivity( "reqSwept", "Request successfully swept", "ReqProxy", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "reqFailed", "Request forward failed", "ReqProxy", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "reqReceived", "Request received", "ReqProxy", "Requests/min", gMonitor.OP_SUM ) return S_OK()
def __init__( self, requestJSON, handlersDict, csPath, agentName, standalone=False, requestClient=None): """c'tor :param self: self reference :param str requestJSON: request serialized to JSON :param dict opHandlers: operation handlers """ self.request = Request(requestJSON) # # csPath self.csPath = csPath # # agent name self.agentName = agentName # # standalone flag self.standalone = standalone # # handlers dict self.handlersDict = handlersDict # # handlers class def self.handlers = {} # # own sublogger self.log = gLogger.getSubLogger("pid_%s/%s" % (os.getpid(), self.request.RequestName)) # # get shifters info self.__managersDict = {} shifterProxies = self.__setupManagerProxies() if not shifterProxies["OK"]: self.log.error(shifterProxies["Message"]) # # initialize gMonitor gMonitor.setComponentType(gMonitor.COMPONENT_AGENT) gMonitor.setComponentName(self.agentName) gMonitor.initialize() # # own gMonitor activities gMonitor.registerActivity("RequestAtt", "Requests processed", "RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("RequestFail", "Requests failed", "RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("RequestOK", "Requests done", "RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM) if requestClient is None: self.requestClient = ReqClient() else: self.requestClient = requestClient
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ OperationHandlerBase.__init__( self, operation, csPath ) # # RegisterFile specific monitor info gMonitor.registerActivity( "RegisterAtt", "Attempted file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Successful file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Failed file registrations", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: operation to execute :param str csPath: CS path for this handler """ # # base class ctor DMSRequestOperationsBase.__init__( self, operation, csPath ) # # gMonitor stuff gMonitor.registerActivity( "RemoveReplicaAtt", "Replica removals attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity("RemoveReplicaOK", "Successful replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity( "RemoveReplicaFail", "Failed replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation instance :param string csPath: CS path for this handler """ # # base class ctor OperationHandlerBase.__init__( self, operation, csPath ) # # gMonitor stuff gMonitor.registerActivity( "LogUploadAtt", "Log upload attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "LogUploadOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "LogUploadFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) self.workDirectory = os.environ.get( 'LOGUPLOAD_CACHE', os.environ.get( 'AGENT_WORKDIRECTORY', '/tmp/LogUpload' ) )
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param Operation operation: Operation to execute :param str csPath: CS path for this handler """ # # call base class ctor DMSRequestOperationsBase.__init__( self, operation, csPath ) # # gMOnitor stuff goes here gMonitor.registerActivity( "RemoveFileAtt", "File removals attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileOK", "Successful file removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileFail", "Failed file removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) # # re pattern for not existing files self.reNotExisting = re.compile( r"(no|not) such file.*", re.IGNORECASE )
def __init__(self, operation=None, csPath=None): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ super(ReplicateAndRegister, self).__init__(operation, csPath) # # own gMonitor stuff for files gMonitor.registerActivity("ReplicateAndRegisterAtt", "Replicate and register attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicateOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicateFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterOK", "Registrations successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterFail", "Registrations failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # for FTS gMonitor.registerActivity("FTSScheduleAtt", "Files schedule attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FTSScheduleOK", "File schedule successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FTSScheduleFail", "File schedule failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # SE cache # Clients self.fc = FileCatalog()
def initializeFileCatalogHandler(serviceInfo): """ handler initialisation """ global gFileCatalogDB dbLocation = getServiceOption(serviceInfo, 'Database', 'DataManagement/FileCatalogDB') gFileCatalogDB = FileCatalogDB(dbLocation) databaseConfig = {} # Obtain the plugins to be used for DB interaction gLogger.info("Initializing with FileCatalog with following managers:") defaultManagers = { 'UserGroupManager': 'UserAndGroupManagerDB', 'SEManager': 'SEManagerDB', 'SecurityManager': 'NoSecurityManager', 'DirectoryManager': 'DirectoryLevelTree', 'FileManager': 'FileManager', 'DirectoryMetadata': 'DirectoryMetadata', 'FileMetadata': 'FileMetadata', 'DatasetManager': 'DatasetManager' } for configKey in sorted(defaultManagers.keys()): defaultValue = defaultManagers[configKey] configValue = getServiceOption(serviceInfo, configKey, defaultValue) gLogger.info("%-20s : %-20s" % (str(configKey), str(configValue))) databaseConfig[configKey] = configValue # Obtain some general configuration of the database gLogger.info( "Initializing the FileCatalog with the following configuration:") defaultConfig = { 'UniqueGUID': False, 'GlobalReadAccess': True, 'LFNPFNConvention': 'Strong', 'ResolvePFN': True, 'DefaultUmask': 0o775, 'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'], 'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'], 'VisibleFileStatus': ['AprioriGood'], 'VisibleReplicaStatus': ['AprioriGood'] } for configKey in sorted(defaultConfig.keys()): defaultValue = defaultConfig[configKey] configValue = getServiceOption(serviceInfo, configKey, defaultValue) gLogger.info("%-20s : %-20s" % (str(configKey), str(configValue))) databaseConfig[configKey] = configValue res = gFileCatalogDB.setConfig(databaseConfig) gMonitor.registerActivity("AddFile", "Amount of addFile calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM) gMonitor.registerActivity("AddFileSuccessful", "Files successfully added", "FileCatalogHandler", "files/min", gMonitor.OP_SUM) gMonitor.registerActivity("AddFileFailed", "Files failed to add", "FileCatalogHandler", "files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFile", "Amount of removeFile calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFileSuccessful", "Files successfully removed", "FileCatalogHandler", "files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveFileFailed", "Files failed to remove", "FileCatalogHandler", "files/min", gMonitor.OP_SUM) gMonitor.registerActivity("AddReplica", "Amount of addReplica calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM) gMonitor.registerActivity("AddReplicaSuccessful", "Replicas successfully added", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM) gMonitor.registerActivity("AddReplicaFailed", "Replicas failed to add", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplica", "Amount of removeReplica calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplicaSuccessful", "Replicas successfully removed", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplicaFailed", "Replicas failed to remove", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM) gMonitor.registerActivity("ListDirectory", "Amount of listDirectory calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM) return res
def initialize( self ): """ agent's initialization """ # # data manager self.dataManager = DataManager() log = self.log.getSubLogger( "initialize" ) self.FTSPLACEMENT_REFRESH = self.am_getOption( "FTSPlacementValidityPeriod", self.FTSPLACEMENT_REFRESH ) log.info( "FTSPlacement validity period = %s s" % self.FTSPLACEMENT_REFRESH ) self.SUBMIT_COMMAND = self.am_getOption( "SubmitCommand", self.SUBMIT_COMMAND ) log.info( "FTS submit command = %s" % self.SUBMIT_COMMAND ) self.MONITOR_COMMAND = self.am_getOption( "MonitorCommand", self.MONITOR_COMMAND ) log.info( "FTS commands: submit = %s monitor %s" % ( self.SUBMIT_COMMAND, self.MONITOR_COMMAND ) ) self.PIN_TIME = self.am_getOption( "PinTime", self.PIN_TIME ) log.info( "Stage files before submission = ", {True: "yes", False: "no"}[bool( self.PIN_TIME )] ) self.MAX_ACTIVE_JOBS = self.am_getOption( "MaxActiveJobsPerRoute", self.MAX_ACTIVE_JOBS ) log.info( "Max active FTSJobs/route = ", str( self.MAX_ACTIVE_JOBS ) ) self.MAX_FILES_PER_JOB = self.am_getOption( "MaxFilesPerJob", self.MAX_FILES_PER_JOB ) log.info( "Max FTSFiles/FTSJob = ", str( self.MAX_FILES_PER_JOB ) ) self.MAX_ATTEMPT = self.am_getOption( "MaxTransferAttempts", self.MAX_ATTEMPT ) log.info( "Max transfer attempts = ", str( self.MAX_ATTEMPT ) ) # # thread pool self.MIN_THREADS = self.am_getOption( "MinThreads", self.MIN_THREADS ) self.MAX_THREADS = self.am_getOption( "MaxThreads", self.MAX_THREADS ) minmax = ( abs( self.MIN_THREADS ), abs( self.MAX_THREADS ) ) self.MIN_THREADS, self.MAX_THREADS = min( minmax ), max( minmax ) log.info( "ThreadPool min threads = ", str( self.MIN_THREADS ) ) log.info( "ThreadPool max threads = ", str( self.MAX_THREADS ) ) self.MAX_REQUESTS = self.am_getOption( "MaxRequests", self.MAX_REQUESTS ) log.info( "Max Requests fetched = ", str( self.MAX_REQUESTS ) ) self.MONITORING_INTERVAL = self.am_getOption( "MonitoringInterval", self.MONITORING_INTERVAL ) log.info( "Minimum monitoring interval = ", str( self.MONITORING_INTERVAL ) ) self.__ftsVersion = Operations().getValue( 'DataManagement/FTSVersion', 'FTS2' ) log.info( "FTSVersion : %s" % self.__ftsVersion ) log.info( "initialize: creation of FTSPlacement..." ) createPlacement = self.resetFTSPlacement() if not createPlacement["OK"]: log.error( "initialize:", createPlacement["Message"] ) return createPlacement # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) log.info( "will use DataManager proxy" ) self.registrationProtocols = getRegistrationProtocols() # # gMonitor stuff here gMonitor.registerActivity( "RequestsAtt", "Attempted requests executions", "FTSAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RequestsOK", "Successful requests executions", "FTSAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RequestsFail", "Failed requests executions", "FTSAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsSubAtt", "FTSJobs creation attempts", "FTSAgent", "Created FTSJobs/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsSubOK", "FTSJobs submitted successfully", "FTSAgent", "Successful FTSJobs submissions/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsSubFail", "FTSJobs submissions failed", "FTSAgent", "Failed FTSJobs submissions/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsMonAtt", "FTSJobs monitored", "FTSAgent", "FTSJobs/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsMonOK", "FTSJobs monitored successfully", "FTSAgent", "FTSJobs/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSJobsMonFail", "FTSJobs attempts failed", "FTSAgent", "FTSJobs/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "FTSMonitorFail", "Failed FTS monitor executions", "FTSAgent", "Execution/mins", gMonitor.OP_SUM ) pollingTime = self.am_getOption( "PollingTime", 60 ) for status in list( FTSJob.INITSTATES + FTSJob.TRANSSTATES + FTSJob.FAILEDSTATES + FTSJob.FINALSTATES ): gMonitor.registerActivity( "FTSJobs%s" % status, "FTSJobs %s" % status , "FTSAgent", "FTSJobs/cycle", gMonitor.OP_ACUM, pollingTime ) gMonitor.registerActivity( "FtSJobsPerRequest", "Average FTSJobs per request", "FTSAgent", "FTSJobs/Request", gMonitor.OP_MEAN ) gMonitor.registerActivity( "FTSFilesPerJob", "FTSFiles per FTSJob", "FTSAgent", "Number of FTSFiles per FTSJob", gMonitor.OP_MEAN ) gMonitor.registerActivity( "FTSSizePerJob", "Average FTSFiles size per FTSJob", "FTSAgent", "Average submitted size per FTSJob", gMonitor.OP_MEAN ) return S_OK()
def __init__(self, operation=None, csPath=None): """c'tor :param self: self reference :param Operation operation: Operation instance :param str csPath: CS path for this handler """ super(ReplicateAndRegister, self).__init__(operation, csPath) # # own gMonitor stuff for files gMonitor.registerActivity("ReplicateAndRegisterAtt", "Replicate and register attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicateOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicateFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterOK", "Registrations successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterFail", "Registrations failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # for FTS gMonitor.registerActivity("FTSScheduleAtt", "Files schedule attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FTSScheduleOK", "File schedule successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FTSScheduleFail", "File schedule failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # SE cache # Clients self.fc = FileCatalog()
def __init__( self, operation = None, csPath = None ): """c'tor :param self: self reference :param ~Operation.Operation operation: Operation instance :param str csPath: CS path for this handler """ super( MoveReplica, self ).__init__( operation, csPath ) # # own gMonitor stuff for files gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicateOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ReplicateFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterOK", "Registrations successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RegisterFail", "Registrations failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplicaAtt", "Replica removals attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplicaOK", "Successful replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplicaFail", "Failed replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM ) # Init ConsistencyInspector: used to check replicas self.ci = ConsistencyInspector()
def __call__(self): """call me maybe""" # The flag 'rmsMonitoring' is set by the RequestTask and is False by default. # Here we use 'createRMSRecord' to create the ES record which is defined inside OperationHandlerBase. if self.rmsMonitoring: self.rmsMonitoringReporter = MonitoringReporter( monitoringType="RMSMonitoring") else: # # own gMonitor stuff for files gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM, ) gMonitor.registerActivity("ReplicateOK", "Replications successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("ReplicateFail", "Replications failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterOK", "Registrations successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RegisterFail", "Registrations failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplicaAtt", "Replica removals attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplicaOK", "Successful replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("RemoveReplicaFail", "Failed replica removals", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # check replicas first res = self.__checkReplicas() if not res["OK"]: self.log.error("Failed to check replicas", res["Message"]) sourceSE = self.operation.SourceSE if self.operation.SourceSE else None if sourceSE: # # check source se for read bannedSource = self.checkSEsRSS(sourceSE, "ReadAccess") if not bannedSource["OK"]: if self.rmsMonitoring: for status in ["Attempted", "Failed"]: self.rmsMonitoringReporter.addRecord( self.createRMSRecord(status, len(self.operation))) self.rmsMonitoringReporter.commit() else: gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation)) gMonitor.addMark("ReplicateFail", len(self.operation)) return bannedSource if bannedSource["Value"]: self.operation.Error = "SourceSE %s is banned for reading" % sourceSE self.log.info(self.operation.Error) return S_OK(self.operation.Error) # # check targetSEs for write bannedTargets = self.checkSEsRSS() if not bannedTargets["OK"]: if self.rmsMonitoring: for status in ["Attempted", "Failed"]: self.rmsMonitoringReporter.addRecord( self.createRMSRecord(status, len(self.operation))) self.rmsMonitoringReporter.commit() else: gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation)) gMonitor.addMark("ReplicateFail", len(self.operation)) return bannedTargets if bannedTargets["Value"]: self.operation.Error = "%s targets are banned for writing" % ",".join( bannedTargets["Value"]) return S_OK(self.operation.Error) # Can continue now self.log.verbose("No targets banned for writing") # # check sourceSEs for removal # # for removal the targetSEs are the sourceSEs of the replication targetSEs = self.operation.sourceSEList bannedTargets = self.checkSEsRSS(targetSEs, access="RemoveAccess") if not bannedTargets["OK"]: if self.rmsMonitoring: for status in ["Attempted", "Failed"]: self.rmsMonitoringReporter.addRecord( self.createRMSRecord(status, len(self.operation))) self.rmsMonitoringReporter.commit() else: gMonitor.addMark("RemoveReplicaAtt") gMonitor.addMark("RemoveReplicaFail") return bannedTargets if bannedTargets["Value"]: return S_OK("%s targets are banned for removal" % ",".join(bannedTargets["Value"])) # Can continue now self.log.verbose("No targets banned for removal") # Do the transfer # # get waiting files. If none just return waitingFiles = self.getWaitingFilesList() if not waitingFiles: return S_OK() # # loop over files self.log.info("Transferring files using Data manager...") for opFile in waitingFiles: res = self.dmTransfer(opFile) if not res["OK"]: continue else: # Do the replica removal self.log.info("Removing files using Data manager...") toRemoveDict = dict([(opFile.LFN, opFile) for opFile in waitingFiles]) self.log.info("todo: %s replicas to delete from %s sites" % (len(toRemoveDict), len(targetSEs))) self.dmRemoval(toRemoveDict, targetSEs) return S_OK()
def initializeFileCatalogHandler( serviceInfo ): """ handler initialisation """ global gFileCatalogDB dbLocation = getServiceOption( serviceInfo, 'Database', 'DataManagement/FileCatalogDB' ) gFileCatalogDB = FileCatalogDB( dbLocation ) databaseConfig = {} # Obtain the plugins to be used for DB interaction gLogger.info( "Initializing with FileCatalog with following managers:" ) defaultManagers = { 'UserGroupManager' : 'UserAndGroupManagerDB', 'SEManager' : 'SEManagerDB', 'SecurityManager' : 'NoSecurityManager', 'DirectoryManager' : 'DirectoryLevelTree', 'FileManager' : 'FileManager', 'DirectoryMetadata' : 'DirectoryMetadata', 'FileMetadata' : 'FileMetadata', 'DatasetManager' : 'DatasetManager' } for configKey in sorted( defaultManagers.keys() ): defaultValue = defaultManagers[configKey] configValue = getServiceOption( serviceInfo, configKey, defaultValue ) gLogger.info( "%-20s : %-20s" % ( str( configKey ), str( configValue ) ) ) databaseConfig[configKey] = configValue # Obtain some general configuration of the database gLogger.info( "Initializing the FileCatalog with the following configuration:" ) defaultConfig = { 'UniqueGUID' : False, 'GlobalReadAccess' : True, 'LFNPFNConvention' : 'Strong', 'ResolvePFN' : True, 'DefaultUmask' : 0775, 'ValidFileStatus' : ['AprioriGood','Trash','Removing','Probing'], 'ValidReplicaStatus' : ['AprioriGood','Trash','Removing','Probing'], 'VisibleFileStatus' : ['AprioriGood'], 'VisibleReplicaStatus': ['AprioriGood']} for configKey in sorted( defaultConfig.keys() ): defaultValue = defaultConfig[configKey] configValue = getServiceOption( serviceInfo, configKey, defaultValue ) gLogger.info( "%-20s : %-20s" % ( str( configKey ), str( configValue ) ) ) databaseConfig[configKey] = configValue res = gFileCatalogDB.setConfig( databaseConfig ) gMonitor.registerActivity( "AddFile", "Amount of addFile calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "AddFileSuccessful", "Files successfully added", "FileCatalogHandler", "files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "AddFileFailed", "Files failed to add", "FileCatalogHandler", "files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFile", "Amount of removeFile calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileSuccessful", "Files successfully removed", "FileCatalogHandler", "files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveFileFailed", "Files failed to remove", "FileCatalogHandler", "files/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "AddReplica", "Amount of addReplica calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "AddReplicaSuccessful", "Replicas successfully added", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "AddReplicaFailed", "Replicas failed to add", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplica", "Amount of removeReplica calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplicaSuccessful", "Replicas successfully removed", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "RemoveReplicaFailed", "Replicas failed to remove", "FileCatalogHandler", "replicas/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "ListDirectory", "Amount of listDirectory calls", "FileCatalogHandler", "calls/min", gMonitor.OP_SUM ) return res
def __init__(self, *args, **kwargs): """ c'tor """ # # call base class ctor AgentModule.__init__(self, *args, **kwargs) # # ProcessPool related stuff self.__requestsPerCycle = self.am_getOption("RequestsPerCycle", self.__requestsPerCycle) self.log.info("Requests/cycle = %d" % self.__requestsPerCycle) self.__minProcess = self.am_getOption("MinProcess", self.__minProcess) self.log.info("ProcessPool min process = %d" % self.__minProcess) self.__maxProcess = self.am_getOption("MaxProcess", 4) self.log.info("ProcessPool max process = %d" % self.__maxProcess) self.__queueSize = self.am_getOption("ProcessPoolQueueSize", self.__queueSize) self.log.info("ProcessPool queue size = %d" % self.__queueSize) self.__poolTimeout = int(self.am_getOption("ProcessPoolTimeout", self.__poolTimeout)) self.log.info("ProcessPool timeout = %d seconds" % self.__poolTimeout) self.__poolSleep = int(self.am_getOption("ProcessPoolSleep", self.__poolSleep)) self.log.info("ProcessPool sleep time = %d seconds" % self.__poolSleep) self.__bulkRequest = self.am_getOption("BulkRequest", 0) self.log.info("Bulk request size = %d" % self.__bulkRequest) # # keep config path and agent name self.agentName = self.am_getModuleParam("fullName") self.__configPath = PathFinder.getAgentSection(self.agentName) # # operation handlers over here opHandlersPath = "%s/%s" % (self.__configPath, "OperationHandlers") opHandlers = gConfig.getSections(opHandlersPath) if not opHandlers["OK"]: self.log.error(opHandlers["Message"]) raise AgentConfigError("OperationHandlers section not found in CS under %s" % self.__configPath) opHandlers = opHandlers["Value"] self.timeOuts = dict() # # handlers dict self.handlersDict = dict() for opHandler in opHandlers: opHandlerPath = "%s/%s/Location" % (opHandlersPath, opHandler) opLocation = gConfig.getValue(opHandlerPath, "") if not opLocation: self.log.error("%s not set for %s operation handler" % (opHandlerPath, opHandler)) continue self.timeOuts[opHandler] = {"PerFile": self.__fileTimeout, "PerOperation": self.__operationTimeout} opTimeout = gConfig.getValue("%s/%s/TimeOut" % (opHandlersPath, opHandler), 0) if opTimeout: self.timeOuts[opHandler]["PerOperation"] = opTimeout fileTimeout = gConfig.getValue("%s/%s/TimeOutPerFile" % (opHandlersPath, opHandler), 0) if fileTimeout: self.timeOuts[opHandler]["PerFile"] = fileTimeout self.handlersDict[opHandler] = opLocation self.log.info("Operation handlers:") for item in enumerate(self.handlersDict.items()): opHandler = item[1][0] self.log.info("[%s] %s: %s (timeout: %d s + %d s per file)" % (item[0], item[1][0], item[1][1], self.timeOuts[opHandler]['PerOperation'], self.timeOuts[opHandler]['PerFile'])) # # common monitor activity gMonitor.registerActivity("Iteration", "Agent Loops", "RequestExecutingAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Processed", "Request Processed", "RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Done", "Request Completed", "RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM) # # create request dict self.__requestCache = dict() # ?? Probably should be removed self.FTSMode = self.am_getOption("FTSMode", False)
def __call__(self): """ reTransfer operation execution """ # The flag 'rmsMonitoring' is set by the RequestTask and is False by default. # Here we use 'createRMSRecord' to create the ES record which is defined inside OperationHandlerBase. if self.rmsMonitoring: self.rmsMonitoringReporter = MonitoringReporter(monitoringType="RMSMonitoring") else: # # gMonitor stuff gMonitor.registerActivity("FileReTransferAtt", "File retransfers attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FileReTransferOK", "File retransfers successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) gMonitor.registerActivity("FileReTransferFail", "File retransfers failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM) # # list of targetSEs targetSEs = self.operation.targetSEList # # check targetSEs for removal targetSE = targetSEs[0] bannedTargets = self.checkSEsRSS(targetSE) if not bannedTargets['OK']: if self.rmsMonitoring: for status in ["Attempted", "Failed"]: self.rmsMonitoringReporter.addRecord( self.createRMSRecord(status, len(self.operation)) ) self.rmsMonitoringReporter.commit() else: gMonitor.addMark("FileReTransferAtt") gMonitor.addMark("FileReTransferFail") return bannedTargets if bannedTargets['Value']: return S_OK("%s targets are banned for writing" % ",".join(bannedTargets['Value'])) # # get waiting files waitingFiles = self.getWaitingFilesList() # # prepare waiting files toRetransfer = dict([(opFile.PFN, opFile) for opFile in waitingFiles]) if self.rmsMonitoring: self.rmsMonitoringReporter.addRecord( self.createRMSRecord("Attempted", len(toRetransfer)) ) else: gMonitor.addMark("FileReTransferAtt", len(toRetransfer)) if len(targetSEs) != 1: error = "only one TargetSE allowed, got %d" % len(targetSEs) for opFile in toRetransfer.values(): opFile.Error = error opFile.Status = "Failed" self.operation.Error = error if self.rmsMonitoring: self.rmsMonitoringReporter.addRecord( self.createRMSRecord("Failed", len(toRetransfer)) ) self.rmsMonitoringReporter.commit() else: gMonitor.addMark("FileReTransferFail", len(toRetransfer)) return S_ERROR(error) se = StorageElement(targetSE) for opFile in toRetransfer.values(): reTransfer = se.retransferOnlineFile(opFile.LFN) if not reTransfer["OK"]: opFile.Error = reTransfer["Message"] self.log.error("Retransfer failed", opFile.Error) if self.rmsMonitoring: self.rmsMonitoringReporter.addRecord( self.createRMSRecord("Failed", 1) ) else: gMonitor.addMark("FileReTransferFail", 1) continue reTransfer = reTransfer["Value"] if opFile.LFN in reTransfer["Failed"]: opFile.Error = reTransfer["Failed"][opFile.LFN] self.log.error("Retransfer failed", opFile.Error) if self.rmsMonitoring: self.rmsMonitoringReporter.addRecord( self.createRMSRecord("Failed", 1) ) else: gMonitor.addMark("FileReTransferFail", 1) continue opFile.Status = "Done" self.log.info("%s retransfer done" % opFile.LFN) if self.rmsMonitoring: self.rmsMonitoringReporter.addRecord( self.createRMSRecord("Successful", 1) ) else: gMonitor.addMark("FileReTransferOK", 1) if self.rmsMonitoring: self.rmsMonitoringReporter.commit() return S_OK()