def test( self ): """ test :param self: self reference """ dlc = DataLoggingClient() self.assertEqual( isinstance( dlc, DataLoggingClient ), True ) self.assertEqual( dlc.getServer(), "DataManagement/DataLogging" ) ping = dlc.ping() self.assertEqual( ping["OK"], True )
def dataLoggingClient(cls): """ DataLoggingClient getter :param cls: class reference """ if not cls.__dataLoggingClient: cls.__dataLoggingClient = DataLoggingClient() return cls.__dataLoggingClient
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
def dataLoggingClient( cls ): """ DataLoggingClient getter :param cls: class reference """ if not cls.__dataLoggingClient: from DIRAC.DataManagementSystem.Client.DataLoggingClient import DataLoggingClient cls.__dataLoggingClient = DataLoggingClient() return cls.__dataLoggingClient
class ClientECase ( DataLoggingArgumentsTestCase ): # this client raise an exception from a decorate method, the exception should be raise by the decorator def setUp( self ): self.dlc = DataLoggingClient() def test_no_exception( self ): client = ClientE() client.doSomething() res = self.dlc.getSequenceByID( '7' ) self.assertTrue( res['OK'], res.get( 'Message', 'OK' ) ) sequence = res['Value'][0] self.assertEqual( len( sequence.methodCalls ), 1 ) hostName = socket.gethostname() self.assertEqual( sequence.hostName.name, hostName ) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get( 'username' ) group = proxyInfo.get( 'group' ) if userName : self.assertEqual( sequence.userName.name, userName ) if group : self.assertEqual( sequence.group.name, group ) self.assertEqual( sequence.caller.name, '__main__.ClientE.doSomething' ) sequence.methodCalls[0].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequence.methodCalls[0].name.name, 'TestDataManager.derror' ) self.assertEqual( sequence.methodCalls[0].actions[0].file.name, '/data/file1' ) self.assertEqual( sequence.methodCalls[0].actions[1].file.name, '/data/file2' ) self.assertEqual( sequence.methodCalls[0].actions[2].file.name, '/data/file3' ) self.assertEqual( sequence.methodCalls[0].actions[3].file.name, '/data/file4' ) self.assertEqual( sequence.methodCalls[0].actions[0].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[1].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[2].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[3].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[0].errorCode, 2 ) self.assertEqual( sequence.methodCalls[0].actions[1].errorCode, 2 ) self.assertEqual( sequence.methodCalls[0].actions[2].errorCode, 2 ) self.assertEqual( sequence.methodCalls[0].actions[3].errorCode, 2 ) self.assertEqual( sequence.methodCalls[0].actions[0].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[1].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[2].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[3].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' )
def initialize(self): self.section = PathFinder.getAgentSection(AGENT_NAME) self.RequestDB = RequestDBMySQL() self.TransferDB = TransferDB() self.DataLog = DataLoggingClient() self.factory = StorageFactory() self.rm = ReplicaManager() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK()
def initialize( self ): self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.DataIntegrityClient = DataIntegrityClient() if self.am_getOption( 'DirectDB', False ): from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB self.MigrationMonitoringDB = MigrationMonitoringDB() else: from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient self.MigrationMonitoringDB = MigrationMonitoringClient() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) self.userName = '******' self.storageElements = self.am_getOption( 'StorageElements', ['CERN-RAW'] ) self.lastMonitors = {} gMonitor.registerActivity( "Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM ) if self.storageElements: gLogger.info( "Agent will be initialised to monitor the following SEs:" ) for se in self.storageElements: gLogger.info( se ) self.lastMonitors[se] = datetime.datetime.utcfromtimestamp( 0.0 ) gMonitor.registerActivity( "Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM ) gMonitor.registerActivity( "MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN ) gMonitor.registerActivity( "MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM ) gMonitor.registerActivity( "ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN ) return S_OK()
def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity( "Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK()
sources.append( srcSE + str( random.randint( 0, randomMax ) ) ) targets = [] for x in range( 4 ): targets.append( targetSE + str( random.randint( 0, randomMax ) ) ) for call in calls : for x in range( 2 ): call.addAction( DLAction( DLFile( files[x * 2] ) , 'Successful' , DLStorageElement( sources[x * 2] ), DLStorageElement( targets[x * 2] ), blob, None, None ) ) call.addAction( DLAction( DLFile( files[x * 2 + 1 ] ) , 'Failed', DLStorageElement( sources[x * 2 + 1 ] ), DLStorageElement( targets[x * 2 + 1] ), blob, 'errorMessage', random.randint( 1, 1999 ) ) ) return sequence done = False start = time.time() client = DataLoggingClient( url = servAddress ) while not done : seq = makeSequence() res = client.insertSequence( seq ) if not res['OK']: print 'error %s' % res['Message'] if ( time.time() - start > maxDuration ): done = True
def setUp(self): self.dlc = DataLoggingClient()
class ClientECase(DataLoggingArgumentsTestCase): # this client raise an exception from a decorate method, the exception should be raise by the decorator def setUp(self): self.dlc = DataLoggingClient() def test_no_exception(self): client = ClientE() client.doSomething() res = self.dlc.getSequenceByID('7') self.assertTrue(res['OK'], res.get('Message', 'OK')) sequence = res['Value'][0] self.assertEqual(len(sequence.methodCalls), 1) hostName = socket.gethostname() self.assertEqual(sequence.hostName.name, hostName) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get('username') group = proxyInfo.get('group') if userName: self.assertEqual(sequence.userName.name, userName) if group: self.assertEqual(sequence.group.name, group) self.assertEqual(sequence.caller.name, '__main__.ClientE.doSomething') sequence.methodCalls[0].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequence.methodCalls[0].name.name, 'TestDataManager.derror') self.assertEqual(sequence.methodCalls[0].actions[0].file.name, '/data/file1') self.assertEqual(sequence.methodCalls[0].actions[1].file.name, '/data/file2') self.assertEqual(sequence.methodCalls[0].actions[2].file.name, '/data/file3') self.assertEqual(sequence.methodCalls[0].actions[3].file.name, '/data/file4') self.assertEqual(sequence.methodCalls[0].actions[0].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[1].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[2].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[3].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[0].errorCode, 2) self.assertEqual(sequence.methodCalls[0].actions[1].errorCode, 2) self.assertEqual(sequence.methodCalls[0].actions[2].errorCode, 2) self.assertEqual(sequence.methodCalls[0].actions[3].errorCode, 2) self.assertEqual( sequence.methodCalls[0].actions[0].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[1].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[2].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' ) self.assertEqual( sequence.methodCalls[0].actions[3].errorMessage, 'No such file or directory ( 2 : the interesting technical message)' )
class ClientDCase(DataLoggingArgumentsTestCase): # this client raise an exception from a decorate method, the exception should be raise by the decorator def setUp(self): self.dlc = DataLoggingClient() def test_no_exception(self): client = ClientD() # we check if an exception is raised with self.assertRaises(Exception): client.doSomething() res = self.dlc.getSequenceByID('6') self.assertTrue(res['OK'], res.get('Message', 'OK')) sequence = res['Value'][0] self.assertEqual(len(sequence.methodCalls), 4) hostName = socket.gethostname() self.assertEqual(sequence.hostName.name, hostName) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get('username') group = proxyInfo.get('group') if userName: self.assertEqual(sequence.userName.name, userName) if group: self.assertEqual(sequence.group.name, group) self.assertEqual(sequence.caller.name, '__main__.ClientD.doSomething') sequence.methodCalls[0].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequence.methodCalls[0].name.name, 'TestDataManager.putAndRegister') self.assertEqual(sequence.methodCalls[0].actions[0].file.name, '/data/file1') self.assertEqual(sequence.methodCalls[0].actions[1].file.name, '/data/file2') self.assertEqual(sequence.methodCalls[0].actions[2].file.name, '/data/file3') self.assertEqual(sequence.methodCalls[0].actions[3].file.name, '/data/file4') self.assertEqual(sequence.methodCalls[0].actions[0].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[1].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[2].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[3].status, 'Failed') self.assertEqual(sequence.methodCalls[0].actions[0].errorMessage, 'addFile exception') self.assertEqual(sequence.methodCalls[0].actions[1].errorMessage, 'addFile exception') self.assertEqual(sequence.methodCalls[0].actions[2].errorMessage, 'addFile exception') self.assertEqual(sequence.methodCalls[0].actions[3].errorMessage, 'addFile exception') sequence.methodCalls[1].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequence.methodCalls[1].name.name, 'TestFileCatalog.addFile') self.assertEqual(sequence.methodCalls[1].actions[0].file.name, '/data/file1') self.assertEqual(sequence.methodCalls[1].actions[1].file.name, '/data/file3') self.assertEqual(sequence.methodCalls[1].actions[0].status, 'Failed') self.assertEqual(sequence.methodCalls[1].actions[1].status, 'Failed') self.assertEqual(sequence.methodCalls[1].actions[0].errorMessage, 'addFile exception') self.assertEqual(sequence.methodCalls[1].actions[1].errorMessage, 'addFile exception') sequence.methodCalls[2].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequence.methodCalls[2].name.name, 'TestStorageElement.putFile') self.assertEqual(sequence.methodCalls[2].actions[0].file.name, '/data/file1') self.assertEqual(sequence.methodCalls[2].actions[1].file.name, '/data/file2') self.assertEqual(sequence.methodCalls[2].actions[2].file.name, '/data/file3') self.assertEqual(sequence.methodCalls[2].actions[3].file.name, '/data/file4') self.assertEqual(sequence.methodCalls[2].actions[0].status, 'Successful') self.assertEqual(sequence.methodCalls[2].actions[1].status, 'Failed') self.assertEqual(sequence.methodCalls[2].actions[2].status, 'Successful') self.assertEqual(sequence.methodCalls[2].actions[3].status, 'Failed') sequence.methodCalls[3].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequence.methodCalls[3].name.name, 'TestStorageElement.getFileSize') self.assertEqual(sequence.methodCalls[3].actions[0].file.name, '/data/file1') self.assertEqual(sequence.methodCalls[3].actions[1].file.name, '/data/file2') self.assertEqual(sequence.methodCalls[3].actions[2].file.name, '/data/file3') self.assertEqual(sequence.methodCalls[3].actions[3].file.name, '/data/file4') self.assertEqual(sequence.methodCalls[3].actions[0].status, 'Successful') self.assertEqual(sequence.methodCalls[3].actions[1].status, 'Failed') self.assertEqual(sequence.methodCalls[3].actions[2].status, 'Successful') self.assertEqual(sequence.methodCalls[3].actions[3].status, 'Failed')
class ClientBCase(DataLoggingArgumentsTestCase): def setUp(self): self.dlc = DataLoggingClient() def test_insertion_equal(self): # we call some methods, they are going to be logged client = ClientB() client.doSomething() res = self.dlc.getSequenceByID('3') self.assertTrue(res['OK'], res.get('Message', 'OK')) sequenceOne = res['Value'][0] res = self.dlc.getSequenceByID('4') self.assertTrue(res['OK'], res.get('Message', 'OK')) sequenceTwo = res['Value'][0] # we compare results self.assertEqual(len(sequenceOne.methodCalls), 4) self.assertEqual(len(sequenceTwo.methodCalls), 1) hostName = socket.gethostname() self.assertEqual(sequenceOne.hostName.name, hostName) self.assertEqual(sequenceTwo.hostName.name, hostName) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get('username') group = proxyInfo.get('group') if userName: self.assertEqual(sequenceOne.userName.name, userName) self.assertEqual(sequenceTwo.userName.name, userName) if group: self.assertEqual(sequenceOne.group.name, group) self.assertEqual(sequenceTwo.group.name, group) self.assertEqual(sequenceOne.caller.name, '__main__.ClientB.doSomething') sequenceOne.methodCalls[0].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequenceOne.methodCalls[0].name.name, 'TestDataManager.putAndRegister') self.assertEqual(sequenceOne.methodCalls[0].actions[0].file.name, '/data/file1') self.assertEqual(sequenceOne.methodCalls[0].actions[1].file.name, '/data/file2') self.assertEqual(sequenceOne.methodCalls[0].actions[2].file.name, '/data/file3') self.assertEqual(sequenceOne.methodCalls[0].actions[3].file.name, '/data/file4') self.assertEqual(sequenceOne.methodCalls[0].actions[0].status, 'Failed') self.assertEqual(sequenceOne.methodCalls[0].actions[1].status, 'Failed') self.assertEqual(sequenceOne.methodCalls[0].actions[2].status, 'Successful') self.assertEqual(sequenceOne.methodCalls[0].actions[3].status, 'Failed') sequenceOne.methodCalls[1].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequenceOne.methodCalls[1].name.name, 'TestFileCatalog.addFile') self.assertEqual(sequenceOne.methodCalls[1].actions[0].file.name, '/data/file1') self.assertEqual(sequenceOne.methodCalls[1].actions[1].file.name, '/data/file3') self.assertEqual(sequenceOne.methodCalls[1].actions[0].status, 'Failed') self.assertEqual(sequenceOne.methodCalls[1].actions[1].status, 'Successful') sequenceOne.methodCalls[2].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequenceOne.methodCalls[2].name.name, 'TestStorageElement.putFile') self.assertEqual(sequenceOne.methodCalls[2].actions[0].file.name, '/data/file1') self.assertEqual(sequenceOne.methodCalls[2].actions[1].file.name, '/data/file2') self.assertEqual(sequenceOne.methodCalls[2].actions[2].file.name, '/data/file3') self.assertEqual(sequenceOne.methodCalls[2].actions[3].file.name, '/data/file4') self.assertEqual(sequenceOne.methodCalls[2].actions[0].status, 'Successful') self.assertEqual(sequenceOne.methodCalls[2].actions[1].status, 'Failed') self.assertEqual(sequenceOne.methodCalls[2].actions[2].status, 'Successful') self.assertEqual(sequenceOne.methodCalls[2].actions[3].status, 'Failed') sequenceOne.methodCalls[3].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequenceOne.methodCalls[3].name.name, 'TestStorageElement.getFileSize') self.assertEqual(sequenceOne.methodCalls[3].actions[0].file.name, '/data/file1') self.assertEqual(sequenceOne.methodCalls[3].actions[1].file.name, '/data/file2') self.assertEqual(sequenceOne.methodCalls[3].actions[2].file.name, '/data/file3') self.assertEqual(sequenceOne.methodCalls[3].actions[3].file.name, '/data/file4') self.assertEqual(sequenceOne.methodCalls[3].actions[0].status, 'Successful') self.assertEqual(sequenceOne.methodCalls[3].actions[1].status, 'Failed') self.assertEqual(sequenceOne.methodCalls[3].actions[2].status, 'Successful') self.assertEqual(sequenceOne.methodCalls[3].actions[3].status, 'Failed') sequenceTwo.methodCalls[0].actions.sort(key=lambda x: x.file.name) self.assertEqual(sequenceTwo.caller.name, '__main__.ClientB.doSomething') self.assertEqual(sequenceTwo.methodCalls[0].name.name, 'TestFileCatalog.getFileSize') self.assertEqual(sequenceTwo.methodCalls[0].actions[0].file.name, '/data/file3') self.assertEqual(sequenceTwo.methodCalls[0].actions[0].status, 'Successful')
class RegistrationAgent(AgentModule, RequestAgentMixIn): def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK() def execute(self): for i in range(self.threadPoolDepth): requestExecutor = ThreadedJob(self.executeRequest) self.threadPool.queueJob(requestExecutor) self.threadPool.processResults() return self.executeRequest() def executeRequest(self): ################################################ # Get a request from request DB res = self.RequestDBClient.getRequest('register') if not res['OK']: gLogger.info( "RegistrationAgent.execute: Failed to get request from database." ) return S_OK() elif not res['Value']: gLogger.info( "RegistrationAgent.execute: No requests to be executed found.") return S_OK() requestString = res['Value']['RequestString'] requestName = res['Value']['RequestName'] sourceServer = res['Value']['Server'] try: jobID = int(res['Value']['JobID']) except: jobID = 0 gLogger.info("RegistrationAgent.execute: Obtained request %s" % requestName) result = self.RequestDBClient.getCurrentExecutionOrder( requestName, sourceServer) if result['OK']: currentOrder = result['Value'] else: return S_OK('Can not get the request execution order') oRequest = RequestContainer(request=requestString) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests('register') if not res['OK']: errStr = "RegistrationAgent.execute: Failed to obtain number of transfer subrequests." gLogger.error(errStr, res['Message']) return S_OK() gLogger.info("RegistrationAgent.execute: Found %s sub requests." % res['Value']) ################################################ # For all the sub-requests in the request modified = False for ind in range(res['Value']): gLogger.info( "RegistrationAgent.execute: Processing sub-request %s." % ind) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'register')['Value'] subExecutionOrder = int(subRequestAttributes['ExecutionOrder']) subStatus = subRequestAttributes['Status'] if subStatus == 'Waiting' and subExecutionOrder <= currentOrder: subRequestFiles = oRequest.getSubRequestFiles( ind, 'register')['Value'] operation = subRequestAttributes['Operation'] ################################################ # If the sub-request is a register file operation if operation == 'registerFile': gLogger.info( "RegistrationAgent.execute: Attempting to execute %s sub-request." % operation) diracSE = str(subRequestAttributes['TargetSE']) if diracSE == 'SE': # We do not care about SE, put any there diracSE = "CERN-FAILOVER" catalog = subRequestAttributes['Catalogue'] if catalog == "None": catalog = '' subrequest_done = True for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = subRequestFile.get('LFN', '') if lfn: lfn = str(lfn) physicalFile = subRequestFile.get('PFN', '') if physicalFile: physicalFile = str(physicalFile) fileSize = subRequestFile.get('Size', 0) if fileSize: fileSize = int(fileSize) fileGuid = subRequestFile.get('GUID', '') if fileGuid: fileGuid = str(fileGuid) checksum = subRequestFile.get('Addler', '') if checksum: checksum = str(checksum) if catalog == 'BookkeepingDB': diracSE = 'CERN-HIST' fileTuple = (lfn, physicalFile, fileSize, diracSE, fileGuid, checksum) res = self.ReplicaManager.registerFile( fileTuple, catalog) print res if not res['OK']: self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'RegistrationAgent') errStr = "RegistrationAgent.execute: Completely failed to register file." gLogger.error(errStr, res['Message']) subrequest_done = False elif lfn in res['Value']['Failed'].keys(): self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'RegistrationAgent') errStr = "RegistrationAgent.execute: Completely failed to register file." gLogger.error(errStr, res['Value']['Failed'][lfn]) subrequest_done = False else: self.DataLog.addFileRecord( lfn, 'Register', diracSE, '', 'TransferAgent') oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: gLogger.info( "RegistrationAgent.execute: File already completed." ) if subrequest_done: oRequest.setSubRequestStatus(ind, 'register', 'Done') ################################################ # If the sub-request is none of the above types else: gLogger.error( "RegistrationAgent.execute: Operation not supported.", operation) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty(ind, 'register')['Value']: oRequest.setSubRequestStatus(ind, 'register', 'Done') ################################################ # If the sub-request is already in terminal state else: gLogger.info( "RegistrationAgent.execute: Sub-request %s is status '%s' and not to be executed." % (ind, subRequestAttributes['Status'])) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest(requestName, requestString, sourceServer) if modified and jobID: result = self.finalizeRequest(requestName, jobID, sourceServer) return S_OK()
class ClientBCase ( DataLoggingArgumentsTestCase ): def setUp( self ): self.dlc = DataLoggingClient() def test_insertion_equal( self ): # we call some methods, they are going to be logged client = ClientB() client.doSomething() res = self.dlc.getSequenceByID( '3' ) self.assertTrue( res['OK'], res.get( 'Message', 'OK' ) ) sequenceOne = res['Value'][0] res = self.dlc.getSequenceByID( '4' ) self.assertTrue( res['OK'], res.get( 'Message', 'OK' ) ) sequenceTwo = res['Value'][0] # we compare results self.assertEqual( len( sequenceOne.methodCalls ), 4 ) self.assertEqual( len( sequenceTwo.methodCalls ), 1 ) hostName = socket.gethostname() self.assertEqual( sequenceOne.hostName.name, hostName ) self.assertEqual( sequenceTwo.hostName.name, hostName ) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get( 'username' ) group = proxyInfo.get( 'group' ) if userName : self.assertEqual( sequenceOne.userName.name, userName ) self.assertEqual( sequenceTwo.userName.name, userName ) if group : self.assertEqual( sequenceOne.group.name, group ) self.assertEqual( sequenceTwo.group.name, group ) self.assertEqual( sequenceOne.caller.name, '__main__.ClientB.doSomething' ) sequenceOne.methodCalls[0].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequenceOne.methodCalls[0].name.name, 'TestDataManager.putAndRegister' ) self.assertEqual( sequenceOne.methodCalls[0].actions[0].file.name, '/data/file1' ) self.assertEqual( sequenceOne.methodCalls[0].actions[1].file.name, '/data/file2' ) self.assertEqual( sequenceOne.methodCalls[0].actions[2].file.name, '/data/file3' ) self.assertEqual( sequenceOne.methodCalls[0].actions[3].file.name, '/data/file4' ) self.assertEqual( sequenceOne.methodCalls[0].actions[0].status, 'Failed' ) self.assertEqual( sequenceOne.methodCalls[0].actions[1].status, 'Failed' ) self.assertEqual( sequenceOne.methodCalls[0].actions[2].status, 'Successful' ) self.assertEqual( sequenceOne.methodCalls[0].actions[3].status, 'Failed' ) sequenceOne.methodCalls[1].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequenceOne.methodCalls[1].name.name, 'TestFileCatalog.addFile' ) self.assertEqual( sequenceOne.methodCalls[1].actions[0].file.name, '/data/file1' ) self.assertEqual( sequenceOne.methodCalls[1].actions[1].file.name, '/data/file3' ) self.assertEqual( sequenceOne.methodCalls[1].actions[0].status, 'Failed' ) self.assertEqual( sequenceOne.methodCalls[1].actions[1].status, 'Successful' ) sequenceOne.methodCalls[2].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequenceOne.methodCalls[2].name.name, 'TestStorageElement.putFile' ) self.assertEqual( sequenceOne.methodCalls[2].actions[0].file.name, '/data/file1' ) self.assertEqual( sequenceOne.methodCalls[2].actions[1].file.name, '/data/file2' ) self.assertEqual( sequenceOne.methodCalls[2].actions[2].file.name, '/data/file3' ) self.assertEqual( sequenceOne.methodCalls[2].actions[3].file.name, '/data/file4' ) self.assertEqual( sequenceOne.methodCalls[2].actions[0].status, 'Successful' ) self.assertEqual( sequenceOne.methodCalls[2].actions[1].status, 'Failed' ) self.assertEqual( sequenceOne.methodCalls[2].actions[2].status, 'Successful' ) self.assertEqual( sequenceOne.methodCalls[2].actions[3].status, 'Failed' ) sequenceOne.methodCalls[3].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequenceOne.methodCalls[3].name.name, 'TestStorageElement.getFileSize' ) self.assertEqual( sequenceOne.methodCalls[3].actions[0].file.name, '/data/file1' ) self.assertEqual( sequenceOne.methodCalls[3].actions[1].file.name, '/data/file2' ) self.assertEqual( sequenceOne.methodCalls[3].actions[2].file.name, '/data/file3' ) self.assertEqual( sequenceOne.methodCalls[3].actions[3].file.name, '/data/file4' ) self.assertEqual( sequenceOne.methodCalls[3].actions[0].status, 'Successful' ) self.assertEqual( sequenceOne.methodCalls[3].actions[1].status, 'Failed' ) self.assertEqual( sequenceOne.methodCalls[3].actions[2].status, 'Successful' ) self.assertEqual( sequenceOne.methodCalls[3].actions[3].status, 'Failed' ) sequenceTwo.methodCalls[0].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequenceTwo.caller.name, '__main__.ClientB.doSomething' ) self.assertEqual( sequenceTwo.methodCalls[0].name.name, 'TestFileCatalog.getFileSize' ) self.assertEqual( sequenceTwo.methodCalls[0].actions[0].file.name, '/data/file3' ) self.assertEqual( sequenceTwo.methodCalls[0].actions[0].status, 'Successful' )
class TransferAgent(AgentModule, RequestAgentMixIn): def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity("Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK() def execute(self): for i in range(self.threadPoolDepth): requestExecutor = ThreadedJob(self.executeRequest) self.threadPool.queueJob(requestExecutor) self.threadPool.processResults() return self.executeRequest() def executeRequest(self): ################################################ # Get a request from request DB gMonitor.addMark("Iteration", 1) res = self.RequestDBClient.getRequest('transfer') if not res['OK']: gLogger.info( "TransferAgent.execute: Failed to get request from database.") return S_OK() elif not res['Value']: gLogger.info( "TransferAgent.execute: No requests to be executed found.") return S_OK() requestString = res['Value']['RequestString'] requestName = res['Value']['RequestName'] sourceServer = res['Value']['Server'] try: jobID = int(res['Value']['JobID']) except: jobID = 0 gLogger.info("TransferAgent.execute: Obtained request %s" % requestName) result = self.RequestDBClient.getCurrentExecutionOrder( requestName, sourceServer) if result['OK']: currentOrder = result['Value'] else: return S_OK('Can not get the request execution order') oRequest = RequestContainer(request=requestString) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests('transfer') if not res['OK']: errStr = "TransferAgent.execute: Failed to obtain number of transfer subrequests." gLogger.error(errStr, res['Message']) return S_OK() gLogger.info("TransferAgent.execute: Found %s sub requests." % res['Value']) ################################################ # For all the sub-requests in the request modified = False for ind in range(res['Value']): gMonitor.addMark("Execute", 1) gLogger.info("TransferAgent.execute: Processing sub-request %s." % ind) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'transfer')['Value'] if subRequestAttributes['ExecutionOrder']: subExecutionOrder = int(subRequestAttributes['ExecutionOrder']) else: subExecutionOrder = 0 subStatus = subRequestAttributes['Status'] if subStatus == 'Waiting' and subExecutionOrder <= currentOrder: subRequestFiles = oRequest.getSubRequestFiles( ind, 'transfer')['Value'] operation = subRequestAttributes['Operation'] subRequestError = '' ################################################ # If the sub-request is a put and register operation if operation == 'putAndRegister' or operation == 'putAndRegisterAndRemove': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation) diracSE = str(subRequestAttributes['TargetSE']) catalog = '' if subRequestAttributes.has_key('Catalogue'): catalog = subRequestAttributes['Catalogue'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark("Put and register", 1) lfn = str(subRequestFile['LFN']) file = subRequestFile['PFN'] guid = subRequestFile['GUID'] addler = subRequestFile['Addler'] res = self.ReplicaManager.putAndRegister( lfn, file, diracSE, guid=guid, checksum=addler, catalog=catalog) if res['OK']: if res['Value']['Successful'].has_key(lfn): if not res['Value']['Successful'][ lfn].has_key('put'): gMonitor.addMark("Put failed", 1) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent') gLogger.info( "TransferAgent.execute: Failed to put %s to %s." % (lfn, diracSE)) subRequestError = "Put operation failed for %s to %s" % ( lfn, diracSE) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Put failed') elif not res['Value']['Successful'][ lfn].has_key('register'): gMonitor.addMark("Put successful", 1) gMonitor.addMark( "File registration failed", 1) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent') self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'TransferAgent') gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % (lfn, diracSE, res['Value'] ['Successful'][lfn]['put'])) gLogger.info( "TransferAgent.execute: Failed to register %s to %s." % (lfn, diracSE)) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Registration failed') subRequestError = "Registration failed for %s to %s" % ( lfn, diracSE) fileDict = res['Value']['Failed'][lfn][ 'register'] registerRequestDict = { 'Attributes': { 'TargetSE': fileDict['TargetSE'], 'Operation': 'registerFile' }, 'Files': [{ 'LFN': fileDict['LFN'], 'PFN': fileDict['PFN'], 'Size': fileDict['Size'], 'Addler': fileDict['Addler'], 'GUID': fileDict['GUID'] }] } gLogger.info( "TransferAgent.execute: Setting registration request for failed file." ) oRequest.addSubRequest( registerRequestDict, 'register') modified = True else: gMonitor.addMark("Put successful", 1) gMonitor.addMark( "File registration successful", 1) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent') self.DataLog.addFileRecord( lfn, 'Register', diracSE, '', 'TransferAgent') gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % (lfn, diracSE, res['Value'] ['Successful'][lfn]['put'])) gLogger.info( "TransferAgent.execute: Successfully registered %s to %s in %s seconds." % (lfn, diracSE, res['Value'] ['Successful'][lfn]['register'])) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: gMonitor.addMark("Put failed", 1) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent') errStr = "TransferAgent.execute: Failed to put and register file." gLogger.error( errStr, "%s %s %s" % (lfn, diracSE, res['Value']['Failed'][lfn])) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Complete file failure') subRequestError = "Failed to put and register file" else: gMonitor.addMark("Put failed", 1) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent') errStr = "TransferAgent.execute: Completely failed to put and register file." gLogger.error(errStr, res['Message']) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'RM call failure') subRequestError = operation + " RM call file" else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a put operation elif operation == 'put': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation) diracSE = subRequestAttributes['TargetSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark("Put", 1) lfn = subRequestFile['LFN'] file = subRequestFile['PFN'] res = self.ReplicaManager.put(lfn, file, diracSE) if res['OK']: if res['Value']['Successful'].has_key(lfn): gMonitor.addMark("Put successful", 1) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent') gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % (lfn, diracSE, res['Value']['Successful'][lfn])) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: gMonitor.addMark("Put failed", 1) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent') errStr = "TransferAgent.execute: Failed to put file." gLogger.error( errStr, "%s %s %s" % (lfn, diracSE, res['Value']['Failed'][lfn])) subRequestError = "Put operation failed for %s to %s" % ( lfn, diracSE) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Put failed') else: gMonitor.addMark("Put failed", 1) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent') errStr = "TransferAgent.execute: Completely failed to put file." gLogger.error(errStr, res['Message']) subRequestError = "Put RM call failed for %s to %s" % ( lfn, diracSE) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Put RM call failed') else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a replicate and register operation elif operation == 'replicateAndRegister' or operation == 'replicateAndRegisterAndRemove': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation) targetSE = subRequestAttributes['TargetSE'] sourceSE = subRequestAttributes['SourceSE'] if sourceSE == "None": sourceSE = '' for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark("Replicate and register", 1) lfn = subRequestFile['LFN'] res = self.ReplicaManager.replicateAndRegister( lfn, targetSE, sourceSE=sourceSE) if res['OK']: if res['Value']['Successful'].has_key(lfn): if not res['Value']['Successful'][ lfn].has_key('replicate'): gLogger.info( "TransferAgent.execute: Failed to replicate %s to %s." % (lfn, targetSE)) gMonitor.addMark( "Replication failed", 1) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, "Error", "Replication failed") subRequestError = "Replication failed for %s to %s" % ( lfn, targetSE) elif not res['Value']['Successful'][ lfn].has_key('register'): gMonitor.addMark( "Replication successful", 1) gMonitor.addMark( "Replica registration failed", 1) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % (lfn, targetSE, res['Value'] ['Successful'][lfn]['replicate'])) gLogger.info( "TransferAgent.execute: Failed to register %s to %s." % (lfn, targetSE)) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Registration failed') subRequestError = "Registration failed for %s to %s" % ( lfn, targetSE) fileDict = res['Value']['Failed'][lfn][ 'register'] registerRequestDict = { 'Attributes': { 'TargetSE': fileDict['TargetSE'], 'Operation': 'registerReplica' }, 'Files': [{ 'LFN': fileDict['LFN'], 'PFN': fileDict['PFN'] }] } gLogger.info( "TransferAgent.execute: Setting registration request for failed replica." ) oRequest.addSubRequest( registerRequestDict, 'register') modified = True else: gMonitor.addMark( "Replication successful", 1) gMonitor.addMark( "Replica registration successful", 1) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % (lfn, targetSE, res['Value'] ['Successful'][lfn]['replicate'])) gLogger.info( "TransferAgent.execute: Successfully registered %s to %s in %s seconds." % (lfn, targetSE, res['Value'] ['Successful'][lfn]['register'])) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: gMonitor.addMark("Replication failed", 1) errStr = "TransferAgent.execute: Failed to replicate and register file." gLogger.error( errStr, "%s %s %s" % (lfn, targetSE, res['Value']['Failed'][lfn])) else: gMonitor.addMark("Replication failed", 1) errStr = "TransferAgent.execute: Completely failed to replicate and register file." gLogger.error(errStr, res['Message']) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'RM call failure') subRequestError = operation + " RM call failed" else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a replicate operation elif operation == 'replicate': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation) targetSE = subRequestAttributes['TargetSE'] sourceSE = subRequestAttributes['SourceSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark("Replicate", 1) lfn = subRequestFile['LFN'] res = self.ReplicaManager.replicate( lfn, targetSE, sourceSE=sourceSE) if res['OK']: if res['Value']['Successful'].has_key(lfn): gMonitor.addMark("Replication successful", 1) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % (lfn, diracSE, res['Value']['Successful'][lfn])) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: gMonitor.addMark("Replication failed", 1) errStr = "TransferAgent.execute: Failed to replicate file." gLogger.error( errStr, "%s %s %s" % (lfn, targetSE, res['Value']['Failed'][lfn])) subRequestError = "Replicate operation failed for %s to %s" % ( lfn, targetSE) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Put failed') else: gMonitor.addMark("Replication failed", 1) errStr = "TransferAgent.execute: Completely failed to replicate file." gLogger.error(errStr, res['Message']) subRequestError = "Replicate RM call failed for %s to %s" % ( lfn, targetSE) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Replicate RM call failed') else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a get operation elif operation == 'get': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation) sourceSE = subRequestAttributes['TargetSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = str(subRequestFile['LFN']) pfn = str(subRequestFile['PFN']) got = False if sourceSE and pfn: res = self.ReplicaManager.getStorageFile( pfn, sourceSE) if res['Value']['Successful'].has_key(pfn): got = True else: res = self.ReplicaManager.getFile(lfn) if res['Value']['Successful'].has_key(lfn): got = False if got: gLogger.info( "TransferAgent.execute: Successfully got %s." % lfn) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done') modified = True else: errStr = "TransferAgent.execute: Failed to get file." gLogger.error(errStr, lfn) else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is none of the above types else: gLogger.error( "TransferAgent.execute: Operation not supported.", operation) if subRequestError: oRequest.setSubRequestAttributeValue( ind, 'transfer', 'Error', subRequestError) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty(ind, 'transfer')['Value']: oRequest.setSubRequestStatus(ind, 'transfer', 'Done') gMonitor.addMark("Done", 1) ################################################ # If the sub-request is already in terminal state else: gLogger.info( "TransferAgent.execute: Sub-request %s is status '%s' and not to be executed." % (ind, subRequestAttributes['Status'])) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest(requestName, requestString, sourceServer) if modified and jobID: result = self.finalizeRequest(requestName, jobID, sourceServer) return S_OK()
class MigrationMonitoringAgent( AgentModule ): def initialize( self ): self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.DataIntegrityClient = DataIntegrityClient() if self.am_getOption( 'DirectDB', False ): from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB self.MigrationMonitoringDB = MigrationMonitoringDB() else: from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient self.MigrationMonitoringDB = MigrationMonitoringClient() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) self.userName = '******' self.storageElements = self.am_getOption( 'StorageElements', ['CERN-RAW'] ) self.lastMonitors = {} gMonitor.registerActivity( "Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM ) if self.storageElements: gLogger.info( "Agent will be initialised to monitor the following SEs:" ) for se in self.storageElements: gLogger.info( se ) self.lastMonitors[se] = datetime.datetime.utcfromtimestamp( 0.0 ) gMonitor.registerActivity( "Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM ) gMonitor.registerActivity( "MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN ) gMonitor.registerActivity( "MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM ) gMonitor.registerActivity( "ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM ) gMonitor.registerActivity( "TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM ) gMonitor.registerActivity( "MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN ) return S_OK() def execute( self ): self.enableFlag = self.am_getOption( 'EnableFlag', 'True' ) if not self.enableFlag == 'True': self.log.info( 'MigrationMonitoringAgent is disabled by configuration option %s/EnableFlag' % ( self.section ) ) return S_OK( 'Disabled via CS flag' ) gMonitor.addMark( "Iteration", 1 ) self.NewToMigrating() for se in self.storageElements: gMonitor.addMark( "Iteration%s" % se, 1 ) self.MigratingToMigrated( se ) return S_OK() ######################################################################################################### # # Includes the file size and checksum information for replicas which do not have it # def NewToMigrating( self ): """ Obtain the new files from the migration monitoring db and (where necessary) add the size and checksum information """ # First get the new files from the database gLogger.info( "NewToMigrating: Attempting to obtain 'New' files." ) res = self.__getFiles( '', 'New' ) if not res['OK']: gLogger.error( "NewToMigrating: Failed to get 'New' files.", res['Message'] ) return res newFiles = res['Value']['Files'] if not newFiles: gLogger.info( "NewToMigrating: Found no 'New' files." ) return S_OK() # Get the metadata from the catalog for which do not have size or checksum res = self.__getCatalogFileMetadata( newFiles ) if not res['OK']: gLogger.error( "NewToMigrating: Failed to get metadata for files", res['Message'] ) return res metadata = res['Value'] # Add the metadata to the migration monitoring DB. res = self.__updateNewMigrating( metadata ) return S_OK() def __updateNewMigrating( self, fileMetadata ): gLogger.info( "__updateNewMigrating: Updating metadata for %s files" % len( fileMetadata ) ) gLogger.info( "PUT THE CODE HERE TO UPDATE THE METDATA" ) #self.__setMigratingReplicaStatus(fileMetadata.keys(),'Migrating') return S_OK() ######################################################################################################### # # Monitors the migration of files # def MigratingToMigrated( self, se ): """ Obtain the active files from the migration monitoring db and check their status """ # First get the migrating files from the database gLogger.info( "[%s] MigratingToMigrated: Attempting to obtain 'Migrating' files." % se ) res = self.__getFiles( se, 'Migrating' ) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to get 'Migrating' files." % se, res['Message'] ) return res pfnIDs = res['Value']['PFNIDs'] if not pfnIDs: gLogger.info( "[%s] MigratingToMigrated: Found no 'Migrating' files." % se ) return S_OK() migratingFiles = res['Value']['Files'] gLogger.info( "[%s] MigratingToMigrated: Found %d 'Migrating' files." % ( se, len( pfnIDs ) ) ) gMonitor.addMark( "MigratingFiles%s" % se, len( pfnIDs ) ) gLogger.info( "[%s] MigratingToMigrated: Obtaining physical file metadata for 'Migrating' files." % se ) startTime = datetime.datetime.utcnow() res = self.__getMigratedFiles( se, pfnIDs.keys() ) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to get 'Migrating' file metadata." % se, res['Message'] ) return res assumedEndTime = datetime.datetime.utcnow() - ( ( datetime.datetime.utcnow() - startTime ) / 2 ) # Assumed that the files are found migrated midway through obtaining the metadata previousMonitorTime = self.lastMonitors[se] self.lastMonitors[se] = datetime.datetime.utcnow() terminal = res['Value']['Terminal'] migrated = res['Value']['Migrated'] # Update the problematic files in the integrity DB and update the MigrationMonitoringDB gLogger.info( "[%s] MigratingToMigrated: Found %d terminally failed files." % ( se, len( terminal ) ) ) if terminal: replicaTuples = [] terminalFileIDs = [] for pfn, prognosis in terminal.items(): fileID = pfnIDs[pfn] terminalFileIDs.append( fileID ) lfn = migratingFiles[fileID]['LFN'] se = migratingFiles[fileID]['SE'] replicaTuples.append( ( lfn, pfn, se, prognosis ) ) self.__reportProblematicReplicas( replicaTuples ) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( terminalFileIDs, 'Failed' ) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to update terminal files." % se, res['Message'] ) # Update the migrated files and send accounting gLogger.info( "[%s] MigratingToMigrated: Found %d migrated files." % ( se, len( migrated ) ) ) if migrated: migratedFileIDs = {} for pfn, checksum in migrated.items(): migratedFileIDs[pfnIDs[pfn]] = checksum #res = self.MigrationMonitoringDB.setMigratingReplicaStatus(migratedFileIDs.keys(),'Migrated') #if not res['OK']: # gLogger.error("[%s] MigratingToMigrated: Failed to update migrated files." % se, res['Message']) # Check the checksums of the migrated files res = self.__validateChecksums( se, migratedFileIDs, migratingFiles ) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to perform checksum matching." % se, res['Message'] ) matchingFiles = [] mismatchingFiles = [] else: matchingFiles = res['Value']['MatchingFiles'] mismatchingFiles = res['Value']['MismatchFiles'] # Create and send the accounting messages res = self.__updateMigrationAccounting( se, migratingFiles, matchingFiles, mismatchingFiles, assumedEndTime, previousMonitorTime ) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to send accounting for migrated files." % se, res['Message'] ) return S_OK() def __getMigratedFiles( self, se, pfns ): # Get the active files from the database migrated = {} terminal = {} res = self.ReplicaManager.getStorageFileMetadata( pfns, se ) if not res['OK']: return res for pfn, error in res['Value']['Failed'].items(): if re.search( "File does not exist", error ): gLogger.error( "[%s] __getStorageMetadata: PFN does not exist at StorageElement." % se, "%s %s" % ( pfn, error ) ) terminal[pfn] = 'PFNMissing' else: gLogger.warn( "[%s] __getMigratedFiles: Failed to obtain physical file metadata." % se, "%s %s" % ( pfn, error ) ) storageMetadata = res['Value']['Successful'] for pfn, metadata in storageMetadata.items(): if metadata['Migrated']: checksum = '' if metadata.has_key( 'Checksum' ): checksum = metadata['Checksum'] migrated[pfn] = checksum elif metadata['Lost']: gLogger.error( "[%s] __getMigratedFiles: PFN has been Lost by the StorageElement." % se, "%s" % ( pfn ) ) terminal[pfn] = 'PFNLost' elif metadata['Unavailable']: gLogger.error( "[%s] __getMigratedFiles: PFN declared Unavailable by StorageElement." % se, "%s" % ( pfn ) ) terminal[pfn] = 'PFNUnavailable' resDict = {'Terminal':terminal, 'Migrated':migrated} return S_OK( resDict ) def __validateChecksums( self, se, migratedFileIDs, migratingFiles ): """ Obtain the checksums in the catalog if not present and check against the checksum from the storage """ lfnFileID = {} checksumToObtain = [] for fileID in migratedFileIDs.keys(): if not migratingFiles[fileID]['Checksum']: lfn = migratingFiles[fileID]['LFN'] checksumToObtain.append( lfn ) lfnFileID[lfn] = fileID if checksumToObtain: res = self.ReplicaManager.getCatalogFileMetadata( checksumToObtain ) if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to obtain file checksums" % se ) return res for lfn, error in res['Value']['Failed'].items(): gLogger.error( "[%s] __validateChecksums: Failed to get file checksum" % se, "%s %s" % ( lfn, error ) ) for lfn, metadata in res['Value']['Successful'].items(): migratingFiles[lfnFileID[lfn]]['Checksum'] = metadata['CheckSumValue'] mismatchFiles = [] matchFiles = [] checksumMismatches = [] fileRecords = [] for fileID, seChecksum in migratedFileIDs.items(): lfn = migratingFiles[fileID]['LFN'] catalogChecksum = migratingFiles[fileID]['Checksum'] if not seChecksum: gLogger.error( "[%s] __validateChecksums: Storage checksum not available" % se, migratingFiles[fileID]['PFN'] ) elif not compareAdler( seChecksum, catalogChecksum ): gLogger.error( "[%s] __validateChecksums: Storage and catalog checksum mismatch" % se, "%s '%s' '%s'" % ( migratingFiles[fileID]['PFN'], seChecksum, catalogChecksum ) ) mismatchFiles.append( fileID ) pfn = migratingFiles[fileID]['PFN'] se = migratingFiles[fileID]['SE'] checksumMismatches.append( ( lfn, pfn, se, 'CatalogPFNChecksumMismatch' ) ) fileRecords.append( ( lfn, 'Checksum match', '%s@%s' % ( seChecksum, se ), '', 'MigrationMonitoringAgent' ) ) else: fileRecords.append( ( lfn, 'Checksum mismatch', '%s@%s' % ( seChecksum, se ), '', 'MigrationMonitoringAgent' ) ) matchFiles.append( fileID ) # Add the data logging records self.DataLog.addFileRecords( fileRecords ) if checksumMismatches: # Update the (mis)matching checksums (in the integrityDB and) in the migration monitoring db self.__reportProblematicReplicas( checksumMismatches ) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( mismatchFiles, 'ChecksumFail' ) if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to update checksum mismatching files." % se, res['Message'] ) if matchFiles: res = self.MigrationMonitoringDB.setMigratingReplicaStatus( matchFiles, 'ChecksumMatch' ) if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to update checksum mismatching files." % se, res['Message'] ) resDict = {'MatchingFiles':matchFiles, 'MismatchFiles':mismatchFiles} return S_OK( resDict ) def __updateMigrationAccounting( self, se, migratingFiles, matchingFiles, mismatchingFiles, assumedEndTime, previousMonitorTime ): """ Create accounting messages for the overall throughput observed and the total migration time for the files """ allMigrated = matchingFiles + mismatchingFiles gMonitor.addMark( "MigratedFiles%s" % se, len( allMigrated ) ) gMonitor.addMark( "TotalMigratedFiles%s" % se, len( allMigrated ) ) lfnFileID = {} sizesToObtain = [] for fileID in allMigrated: if not migratingFiles[fileID]['Size']: lfn = migratingFiles[fileID]['LFN'] sizesToObtain.append( lfn ) lfnFileID[lfn] = fileID if sizesToObtain: res = self.ReplicaManager.getCatalogFileSize( sizesToObtain ) if not res['OK']: gLogger.error( "[%s] __updateMigrationAccounting: Failed to obtain file sizes" % se ) return res for lfn, error in res['Value']['Failed'].items(): gLogger.error( "[%s] __updateAccounting: Failed to get file size" % se, "%s %s" % ( lfn, error ) ) migratingFiles[lfnFileID[lfn]]['Size'] = 0 for lfn, size in res['Value']['Successful'].items(): migratingFiles[lfnFileID[lfn]]['Size'] = size totalSize = 0 for fileID in allMigrated: size = migratingFiles[fileID]['Size'] totalSize += size submitTime = migratingFiles[fileID]['SubmitTime'] timeDiff = submitTime - assumedEndTime migrationTime = ( timeDiff.days * 86400 ) + ( timeDiff.seconds ) + ( timeDiff.microseconds / 1000000.0 ) gMonitor.addMark( "MigrationTime%s" % se, migrationTime ) gDataStoreClient.addRegister( self.__initialiseAccountingObject( 'MigrationTime', se, submitTime, assumedEndTime, size ) ) gDataStoreClient.addRegister( self.__initialiseAccountingObject( 'MigrationThroughput', se, previousMonitorTime, assumedEndTime, size ) ) oDataOperation = self.__initialiseAccountingObject( 'MigrationSuccess', se, submitTime, assumedEndTime, size ) if fileID in mismatchingFiles: oDataOperation.setValueByKey( 'TransferOK', 0 ) oDataOperation.setValueByKey( 'FinalStatus', 'Failed' ) gDataStoreClient.addRegister( oDataOperation ) gMonitor.addMark( "TotalMigratedSize%s" % se, totalSize ) gMonitor.addMark( "ChecksumMismatches%s" % se, len( mismatchingFiles ) ) gMonitor.addMark( "TotalChecksumMismatches%s" % se, len( mismatchingFiles ) ) gMonitor.addMark( "ChecksumMatches%s" % se, len( matchingFiles ) ) gMonitor.addMark( "TotalChecksumMatches%s" % se, len( matchingFiles ) ) if allMigrated: gLogger.info( '[%s] __updateMigrationAccounting: Attempting to send accounting message...' % se ) return gDataStoreClient.commit() return S_OK() ######################################################################################################### # # Utility methods used by all methods # def __getFiles( self, se, status ): # Get files with the given status and se from the database res = self.MigrationMonitoringDB.getMigratingReplicas( se, status ) if not res['OK']: return res files = res['Value'] pfnIDs = {} if len( files.keys() ) > 0: for fileID, metadataDict in files.items(): pfn = metadataDict['PFN'] pfnIDs[pfn] = fileID return S_OK( {'PFNIDs':pfnIDs, 'Files':files} ) def __getCatalogFileMetadata( self, files ): lfnFileID = {} metadataToObtain = [] for fileID, metadata in files.items(): if not ( metadata['Size'] and metadata['Checksum'] ): lfn = metadata['LFN'] metadataToObtain.append( lfn ) lfnFileID[lfn] = fileID if not metadataToObtain: return S_OK() res = self.ReplicaManager.getCatalogFileMetadata( metadataToObtain ) if not res['OK']: gLogger.error( "__getCatalogFileMetadata: Failed to obtain file metadata", res['Message'] ) return res successful = res['Value']['Successful'] failed = res['Value']['Failed'] terminalIDs = [] problematicFiles = [] for lfn, error in failed.items(): gLogger.error( "__getCatalogFileMetadata: Failed to get file metadata", "%s %s" % ( lfn, error ) ) if re.search( "No such file or directory", error ): fileID = lfnFileID[lfn] lfn = files[fileID]['LFN'] pfn = files[fileID]['PFN'] se = files[fileID]['SE'] problematicFiles.append( lfn ) terminalIDs.append( fileID ) if terminalIDs: self.__reportProblematicFiles( problematicFiles, 'LFNCatalogMissing' ) self.__setMigratingReplicaStatus( terminalIDs, 'Failed' ) fileMetadata = {} for lfn, metadata in successful.items(): size = metadata['Size'] checksum = metadata['CheckSumValue'] fileMetadata[lfnFileID[lfn]] = {'Size':size, 'Checksum':checksum} return S_OK( fileMetadata ) def __setMigratingReplicaStatus( self, fileIDs, status ): gLogger.info( "__setMigratingReplicaStatus: Attempting to update %s files to '%s'" % ( len( fileIDs ), status ) ) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( fileIDs, status ) if not res['OK']: gLogger.info( "__setMigratingReplicaStatus: Failed to update status of files", res['Message'] ) else: gLogger.info( "__setMigratingReplicaStatus: Successfully updated status of files" ) def __reportProblematicFiles( self, lfns, reason ): gLogger.info( '__reportProblematicFiles: The following %s files were found with %s' % ( len( lfns ), reason ) ) for lfn in sortList( lfns ): gLogger.info( lfn ) res = self.DataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'MigrationMonitoringAgent' ) if not res['OK']: gLogger.info( '__reportProblematicFiles: Failed to update integrity DB with files', res['Message'] ) else: gLogger.info( '__reportProblematicFiles: Successfully updated integrity DB with files' ) def __reportProblematicReplicas( self, replicaTuples ): gLogger.info( '__reportProblematicReplicas: The following %s files being reported to integrity DB:' % ( len( replicaTuples ) ) ) for lfn, pfn, se, reason in sortList( replicaTuples ): if lfn: gLogger.info( lfn ) else: gLogger.info( pfn ) res = self.DataIntegrityClient.setReplicaProblematic( replicaTuples, sourceComponent = 'MigrationMonitoringAgent' ) if not res['OK']: gLogger.info( '__reportProblematicReplicas: Failed to update integrity DB with replicas', res['Message'] ) else: gLogger.info( '__reportProblematicReplicas: Successfully updated integrity DB with replicas' ) def __initialiseAccountingObject( self, operation, se, startTime, endTime, size ): accountingDict = {} accountingDict['OperationType'] = operation accountingDict['User'] = self.userName accountingDict['Protocol'] = 'SRM' accountingDict['RegistrationTime'] = 0.0 accountingDict['RegistrationOK'] = 0 accountingDict['RegistrationTotal'] = 0 accountingDict['TransferTotal'] = 1 accountingDict['TransferOK'] = 1 accountingDict['TransferSize'] = size timeDiff = endTime - startTime transferTime = ( timeDiff.days * 86400 ) + ( timeDiff.seconds ) + ( timeDiff.microseconds / 1000000.0 ) accountingDict['TransferTime'] = transferTime accountingDict['FinalStatus'] = 'Successful' accountingDict['Source'] = siteName() accountingDict['Destination'] = se oDataOperation = DataOperation() oDataOperation.setEndTime( endTime ) oDataOperation.setStartTime( startTime ) oDataOperation.setValuesFromDict( accountingDict ) return oDataOperation
def initialize(self): self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.DataIntegrityClient = DataIntegrityClient() if self.am_getOption('DirectDB', False): from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB self.MigrationMonitoringDB = MigrationMonitoringDB() else: from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient self.MigrationMonitoringDB = MigrationMonitoringClient() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') self.userName = '******' self.storageElements = self.am_getOption('StorageElements', ['CERN-RAW']) self.lastMonitors = {} gMonitor.registerActivity("Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM) if self.storageElements: gLogger.info( "Agent will be initialised to monitor the following SEs:") for se in self.storageElements: gLogger.info(se) self.lastMonitors[se] = datetime.datetime.utcfromtimestamp(0.0) gMonitor.registerActivity("Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM) gMonitor.registerActivity("MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN) gMonitor.registerActivity("MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM) gMonitor.registerActivity("ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN) return S_OK()
class MigrationMonitoringAgent(AgentModule): def initialize(self): self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.DataIntegrityClient = DataIntegrityClient() if self.am_getOption('DirectDB', False): from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB self.MigrationMonitoringDB = MigrationMonitoringDB() else: from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient self.MigrationMonitoringDB = MigrationMonitoringClient() # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') self.userName = '******' self.storageElements = self.am_getOption('StorageElements', ['CERN-RAW']) self.lastMonitors = {} gMonitor.registerActivity("Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM) if self.storageElements: gLogger.info( "Agent will be initialised to monitor the following SEs:") for se in self.storageElements: gLogger.info(se) self.lastMonitors[se] = datetime.datetime.utcfromtimestamp(0.0) gMonitor.registerActivity("Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM) gMonitor.registerActivity("MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN) gMonitor.registerActivity("MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM) gMonitor.registerActivity("ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM) gMonitor.registerActivity("TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM) gMonitor.registerActivity("MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN) return S_OK() def execute(self): self.enableFlag = self.am_getOption('EnableFlag', 'True') if not self.enableFlag == 'True': self.log.info( 'MigrationMonitoringAgent is disabled by configuration option %s/EnableFlag' % (self.section)) return S_OK('Disabled via CS flag') gMonitor.addMark("Iteration", 1) self.NewToMigrating() for se in self.storageElements: gMonitor.addMark("Iteration%s" % se, 1) self.MigratingToMigrated(se) return S_OK() ######################################################################################################### # # Includes the file size and checksum information for replicas which do not have it # def NewToMigrating(self): """ Obtain the new files from the migration monitoring db and (where necessary) add the size and checksum information """ # First get the new files from the database gLogger.info("NewToMigrating: Attempting to obtain 'New' files.") res = self.__getFiles('', 'New') if not res['OK']: gLogger.error("NewToMigrating: Failed to get 'New' files.", res['Message']) return res newFiles = res['Value']['Files'] if not newFiles: gLogger.info("NewToMigrating: Found no 'New' files.") return S_OK() # Get the metadata from the catalog for which do not have size or checksum res = self.__getCatalogFileMetadata(newFiles) if not res['OK']: gLogger.error("NewToMigrating: Failed to get metadata for files", res['Message']) return res metadata = res['Value'] # Add the metadata to the migration monitoring DB. res = self.__updateNewMigrating(metadata) return S_OK() def __updateNewMigrating(self, fileMetadata): gLogger.info("__updateNewMigrating: Updating metadata for %s files" % len(fileMetadata)) gLogger.info("PUT THE CODE HERE TO UPDATE THE METDATA") #self.__setMigratingReplicaStatus(fileMetadata.keys(),'Migrating') return S_OK() ######################################################################################################### # # Monitors the migration of files # def MigratingToMigrated(self, se): """ Obtain the active files from the migration monitoring db and check their status """ # First get the migrating files from the database gLogger.info( "[%s] MigratingToMigrated: Attempting to obtain 'Migrating' files." % se) res = self.__getFiles(se, 'Migrating') if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to get 'Migrating' files." % se, res['Message']) return res pfnIDs = res['Value']['PFNIDs'] if not pfnIDs: gLogger.info( "[%s] MigratingToMigrated: Found no 'Migrating' files." % se) return S_OK() migratingFiles = res['Value']['Files'] gLogger.info("[%s] MigratingToMigrated: Found %d 'Migrating' files." % (se, len(pfnIDs))) gMonitor.addMark("MigratingFiles%s" % se, len(pfnIDs)) gLogger.info( "[%s] MigratingToMigrated: Obtaining physical file metadata for 'Migrating' files." % se) startTime = datetime.datetime.utcnow() res = self.__getMigratedFiles(se, pfnIDs.keys()) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to get 'Migrating' file metadata." % se, res['Message']) return res assumedEndTime = datetime.datetime.utcnow() - ( (datetime.datetime.utcnow() - startTime) / 2 ) # Assumed that the files are found migrated midway through obtaining the metadata previousMonitorTime = self.lastMonitors[se] self.lastMonitors[se] = datetime.datetime.utcnow() terminal = res['Value']['Terminal'] migrated = res['Value']['Migrated'] # Update the problematic files in the integrity DB and update the MigrationMonitoringDB gLogger.info( "[%s] MigratingToMigrated: Found %d terminally failed files." % (se, len(terminal))) if terminal: replicaTuples = [] terminalFileIDs = [] for pfn, prognosis in terminal.items(): fileID = pfnIDs[pfn] terminalFileIDs.append(fileID) lfn = migratingFiles[fileID]['LFN'] se = migratingFiles[fileID]['SE'] replicaTuples.append((lfn, pfn, se, prognosis)) self.__reportProblematicReplicas(replicaTuples) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( terminalFileIDs, 'Failed') if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to update terminal files." % se, res['Message']) # Update the migrated files and send accounting gLogger.info("[%s] MigratingToMigrated: Found %d migrated files." % (se, len(migrated))) if migrated: migratedFileIDs = {} for pfn, checksum in migrated.items(): migratedFileIDs[pfnIDs[pfn]] = checksum #res = self.MigrationMonitoringDB.setMigratingReplicaStatus(migratedFileIDs.keys(),'Migrated') #if not res['OK']: # gLogger.error("[%s] MigratingToMigrated: Failed to update migrated files." % se, res['Message']) # Check the checksums of the migrated files res = self.__validateChecksums(se, migratedFileIDs, migratingFiles) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to perform checksum matching." % se, res['Message']) matchingFiles = [] mismatchingFiles = [] else: matchingFiles = res['Value']['MatchingFiles'] mismatchingFiles = res['Value']['MismatchFiles'] # Create and send the accounting messages res = self.__updateMigrationAccounting(se, migratingFiles, matchingFiles, mismatchingFiles, assumedEndTime, previousMonitorTime) if not res['OK']: gLogger.error( "[%s] MigratingToMigrated: Failed to send accounting for migrated files." % se, res['Message']) return S_OK() def __getMigratedFiles(self, se, pfns): # Get the active files from the database migrated = {} terminal = {} res = self.ReplicaManager.getStorageFileMetadata(pfns, se) if not res['OK']: return res for pfn, error in res['Value']['Failed'].items(): if re.search("File does not exist", error): gLogger.error( "[%s] __getStorageMetadata: PFN does not exist at StorageElement." % se, "%s %s" % (pfn, error)) terminal[pfn] = 'PFNMissing' else: gLogger.warn( "[%s] __getMigratedFiles: Failed to obtain physical file metadata." % se, "%s %s" % (pfn, error)) storageMetadata = res['Value']['Successful'] for pfn, metadata in storageMetadata.items(): if metadata['Migrated']: checksum = '' if metadata.has_key('Checksum'): checksum = metadata['Checksum'] migrated[pfn] = checksum elif metadata['Lost']: gLogger.error( "[%s] __getMigratedFiles: PFN has been Lost by the StorageElement." % se, "%s" % (pfn)) terminal[pfn] = 'PFNLost' elif metadata['Unavailable']: gLogger.error( "[%s] __getMigratedFiles: PFN declared Unavailable by StorageElement." % se, "%s" % (pfn)) terminal[pfn] = 'PFNUnavailable' resDict = {'Terminal': terminal, 'Migrated': migrated} return S_OK(resDict) def __validateChecksums(self, se, migratedFileIDs, migratingFiles): """ Obtain the checksums in the catalog if not present and check against the checksum from the storage """ lfnFileID = {} checksumToObtain = [] for fileID in migratedFileIDs.keys(): if not migratingFiles[fileID]['Checksum']: lfn = migratingFiles[fileID]['LFN'] checksumToObtain.append(lfn) lfnFileID[lfn] = fileID if checksumToObtain: res = self.ReplicaManager.getCatalogFileMetadata(checksumToObtain) if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to obtain file checksums" % se) return res for lfn, error in res['Value']['Failed'].items(): gLogger.error( "[%s] __validateChecksums: Failed to get file checksum" % se, "%s %s" % (lfn, error)) for lfn, metadata in res['Value']['Successful'].items(): migratingFiles[ lfnFileID[lfn]]['Checksum'] = metadata['CheckSumValue'] mismatchFiles = [] matchFiles = [] checksumMismatches = [] fileRecords = [] for fileID, seChecksum in migratedFileIDs.items(): lfn = migratingFiles[fileID]['LFN'] catalogChecksum = migratingFiles[fileID]['Checksum'] if not seChecksum: gLogger.error( "[%s] __validateChecksums: Storage checksum not available" % se, migratingFiles[fileID]['PFN']) elif not compareAdler(seChecksum, catalogChecksum): gLogger.error( "[%s] __validateChecksums: Storage and catalog checksum mismatch" % se, "%s '%s' '%s'" % (migratingFiles[fileID]['PFN'], seChecksum, catalogChecksum)) mismatchFiles.append(fileID) pfn = migratingFiles[fileID]['PFN'] se = migratingFiles[fileID]['SE'] checksumMismatches.append( (lfn, pfn, se, 'CatalogPFNChecksumMismatch')) fileRecords.append( (lfn, 'Checksum match', '%s@%s' % (seChecksum, se), '', 'MigrationMonitoringAgent')) else: fileRecords.append( (lfn, 'Checksum mismatch', '%s@%s' % (seChecksum, se), '', 'MigrationMonitoringAgent')) matchFiles.append(fileID) # Add the data logging records self.DataLog.addFileRecords(fileRecords) if checksumMismatches: # Update the (mis)matching checksums (in the integrityDB and) in the migration monitoring db self.__reportProblematicReplicas(checksumMismatches) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( mismatchFiles, 'ChecksumFail') if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to update checksum mismatching files." % se, res['Message']) if matchFiles: res = self.MigrationMonitoringDB.setMigratingReplicaStatus( matchFiles, 'ChecksumMatch') if not res['OK']: gLogger.error( "[%s] __validateChecksums: Failed to update checksum mismatching files." % se, res['Message']) resDict = {'MatchingFiles': matchFiles, 'MismatchFiles': mismatchFiles} return S_OK(resDict) def __updateMigrationAccounting(self, se, migratingFiles, matchingFiles, mismatchingFiles, assumedEndTime, previousMonitorTime): """ Create accounting messages for the overall throughput observed and the total migration time for the files """ allMigrated = matchingFiles + mismatchingFiles gMonitor.addMark("MigratedFiles%s" % se, len(allMigrated)) gMonitor.addMark("TotalMigratedFiles%s" % se, len(allMigrated)) lfnFileID = {} sizesToObtain = [] for fileID in allMigrated: if not migratingFiles[fileID]['Size']: lfn = migratingFiles[fileID]['LFN'] sizesToObtain.append(lfn) lfnFileID[lfn] = fileID if sizesToObtain: res = self.ReplicaManager.getCatalogFileSize(sizesToObtain) if not res['OK']: gLogger.error( "[%s] __updateMigrationAccounting: Failed to obtain file sizes" % se) return res for lfn, error in res['Value']['Failed'].items(): gLogger.error( "[%s] __updateAccounting: Failed to get file size" % se, "%s %s" % (lfn, error)) migratingFiles[lfnFileID[lfn]]['Size'] = 0 for lfn, size in res['Value']['Successful'].items(): migratingFiles[lfnFileID[lfn]]['Size'] = size totalSize = 0 for fileID in allMigrated: size = migratingFiles[fileID]['Size'] totalSize += size submitTime = migratingFiles[fileID]['SubmitTime'] timeDiff = submitTime - assumedEndTime migrationTime = (timeDiff.days * 86400) + (timeDiff.seconds) + ( timeDiff.microseconds / 1000000.0) gMonitor.addMark("MigrationTime%s" % se, migrationTime) gDataStoreClient.addRegister( self.__initialiseAccountingObject('MigrationTime', se, submitTime, assumedEndTime, size)) gDataStoreClient.addRegister( self.__initialiseAccountingObject('MigrationThroughput', se, previousMonitorTime, assumedEndTime, size)) oDataOperation = self.__initialiseAccountingObject( 'MigrationSuccess', se, submitTime, assumedEndTime, size) if fileID in mismatchingFiles: oDataOperation.setValueByKey('TransferOK', 0) oDataOperation.setValueByKey('FinalStatus', 'Failed') gDataStoreClient.addRegister(oDataOperation) gMonitor.addMark("TotalMigratedSize%s" % se, totalSize) gMonitor.addMark("ChecksumMismatches%s" % se, len(mismatchingFiles)) gMonitor.addMark("TotalChecksumMismatches%s" % se, len(mismatchingFiles)) gMonitor.addMark("ChecksumMatches%s" % se, len(matchingFiles)) gMonitor.addMark("TotalChecksumMatches%s" % se, len(matchingFiles)) if allMigrated: gLogger.info( '[%s] __updateMigrationAccounting: Attempting to send accounting message...' % se) return gDataStoreClient.commit() return S_OK() ######################################################################################################### # # Utility methods used by all methods # def __getFiles(self, se, status): # Get files with the given status and se from the database res = self.MigrationMonitoringDB.getMigratingReplicas(se, status) if not res['OK']: return res files = res['Value'] pfnIDs = {} if len(files.keys()) > 0: for fileID, metadataDict in files.items(): pfn = metadataDict['PFN'] pfnIDs[pfn] = fileID return S_OK({'PFNIDs': pfnIDs, 'Files': files}) def __getCatalogFileMetadata(self, files): lfnFileID = {} metadataToObtain = [] for fileID, metadata in files.items(): if not (metadata['Size'] and metadata['Checksum']): lfn = metadata['LFN'] metadataToObtain.append(lfn) lfnFileID[lfn] = fileID if not metadataToObtain: return S_OK() res = self.ReplicaManager.getCatalogFileMetadata(metadataToObtain) if not res['OK']: gLogger.error( "__getCatalogFileMetadata: Failed to obtain file metadata", res['Message']) return res successful = res['Value']['Successful'] failed = res['Value']['Failed'] terminalIDs = [] problematicFiles = [] for lfn, error in failed.items(): gLogger.error( "__getCatalogFileMetadata: Failed to get file metadata", "%s %s" % (lfn, error)) if re.search("No such file or directory", error): fileID = lfnFileID[lfn] lfn = files[fileID]['LFN'] pfn = files[fileID]['PFN'] se = files[fileID]['SE'] problematicFiles.append(lfn) terminalIDs.append(fileID) if terminalIDs: self.__reportProblematicFiles(problematicFiles, 'LFNCatalogMissing') self.__setMigratingReplicaStatus(terminalIDs, 'Failed') fileMetadata = {} for lfn, metadata in successful.items(): size = metadata['Size'] checksum = metadata['CheckSumValue'] fileMetadata[lfnFileID[lfn]] = {'Size': size, 'Checksum': checksum} return S_OK(fileMetadata) def __setMigratingReplicaStatus(self, fileIDs, status): gLogger.info( "__setMigratingReplicaStatus: Attempting to update %s files to '%s'" % (len(fileIDs), status)) res = self.MigrationMonitoringDB.setMigratingReplicaStatus( fileIDs, status) if not res['OK']: gLogger.info( "__setMigratingReplicaStatus: Failed to update status of files", res['Message']) else: gLogger.info( "__setMigratingReplicaStatus: Successfully updated status of files" ) def __reportProblematicFiles(self, lfns, reason): gLogger.info( '__reportProblematicFiles: The following %s files were found with %s' % (len(lfns), reason)) for lfn in sortList(lfns): gLogger.info(lfn) res = self.DataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent='MigrationMonitoringAgent') if not res['OK']: gLogger.info( '__reportProblematicFiles: Failed to update integrity DB with files', res['Message']) else: gLogger.info( '__reportProblematicFiles: Successfully updated integrity DB with files' ) def __reportProblematicReplicas(self, replicaTuples): gLogger.info( '__reportProblematicReplicas: The following %s files being reported to integrity DB:' % (len(replicaTuples))) for lfn, pfn, se, reason in sortList(replicaTuples): if lfn: gLogger.info(lfn) else: gLogger.info(pfn) res = self.DataIntegrityClient.setReplicaProblematic( replicaTuples, sourceComponent='MigrationMonitoringAgent') if not res['OK']: gLogger.info( '__reportProblematicReplicas: Failed to update integrity DB with replicas', res['Message']) else: gLogger.info( '__reportProblematicReplicas: Successfully updated integrity DB with replicas' ) def __initialiseAccountingObject(self, operation, se, startTime, endTime, size): accountingDict = {} accountingDict['OperationType'] = operation accountingDict['User'] = self.userName accountingDict['Protocol'] = 'SRM' accountingDict['RegistrationTime'] = 0.0 accountingDict['RegistrationOK'] = 0 accountingDict['RegistrationTotal'] = 0 accountingDict['TransferTotal'] = 1 accountingDict['TransferOK'] = 1 accountingDict['TransferSize'] = size timeDiff = endTime - startTime transferTime = (timeDiff.days * 86400) + (timeDiff.seconds) + ( timeDiff.microseconds / 1000000.0) accountingDict['TransferTime'] = transferTime accountingDict['FinalStatus'] = 'Successful' accountingDict['Source'] = siteName() accountingDict['Destination'] = se oDataOperation = DataOperation() oDataOperation.setEndTime(endTime) oDataOperation.setStartTime(startTime) oDataOperation.setValuesFromDict(accountingDict) return oDataOperation
def setUp( self ): self.dlc = DataLoggingClient()
class ClientDCase ( DataLoggingArgumentsTestCase ): # this client raise an exception from a decorate method, the exception should be raise by the decorator def setUp( self ): self.dlc = DataLoggingClient() def test_no_exception( self ): client = ClientD() # we check if an exception is raised with self.assertRaises( Exception ): client.doSomething() res = self.dlc.getSequenceByID( '6' ) self.assertTrue( res['OK'], res.get( 'Message', 'OK' ) ) sequence = res['Value'][0] self.assertEqual( len( sequence.methodCalls ), 4 ) hostName = socket.gethostname() self.assertEqual( sequence.hostName.name, hostName ) proxyInfo = getProxyInfo() if proxyInfo['OK']: proxyInfo = proxyInfo['Value'] userName = proxyInfo.get( 'username' ) group = proxyInfo.get( 'group' ) if userName : self.assertEqual( sequence.userName.name, userName ) if group : self.assertEqual( sequence.group.name, group ) self.assertEqual( sequence.caller.name, '__main__.ClientD.doSomething' ) sequence.methodCalls[0].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequence.methodCalls[0].name.name, 'TestDataManager.putAndRegister' ) self.assertEqual( sequence.methodCalls[0].actions[0].file.name, '/data/file1' ) self.assertEqual( sequence.methodCalls[0].actions[1].file.name, '/data/file2' ) self.assertEqual( sequence.methodCalls[0].actions[2].file.name, '/data/file3' ) self.assertEqual( sequence.methodCalls[0].actions[3].file.name, '/data/file4' ) self.assertEqual( sequence.methodCalls[0].actions[0].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[1].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[2].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[3].status, 'Failed' ) self.assertEqual( sequence.methodCalls[0].actions[0].errorMessage, 'addFile exception' ) self.assertEqual( sequence.methodCalls[0].actions[1].errorMessage, 'addFile exception' ) self.assertEqual( sequence.methodCalls[0].actions[2].errorMessage, 'addFile exception' ) self.assertEqual( sequence.methodCalls[0].actions[3].errorMessage, 'addFile exception' ) sequence.methodCalls[1].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequence.methodCalls[1].name.name, 'TestFileCatalog.addFile' ) self.assertEqual( sequence.methodCalls[1].actions[0].file.name, '/data/file1' ) self.assertEqual( sequence.methodCalls[1].actions[1].file.name, '/data/file3' ) self.assertEqual( sequence.methodCalls[1].actions[0].status, 'Failed' ) self.assertEqual( sequence.methodCalls[1].actions[1].status, 'Failed' ) self.assertEqual( sequence.methodCalls[1].actions[0].errorMessage, 'addFile exception' ) self.assertEqual( sequence.methodCalls[1].actions[1].errorMessage, 'addFile exception' ) sequence.methodCalls[2].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequence.methodCalls[2].name.name, 'TestStorageElement.putFile' ) self.assertEqual( sequence.methodCalls[2].actions[0].file.name, '/data/file1' ) self.assertEqual( sequence.methodCalls[2].actions[1].file.name, '/data/file2' ) self.assertEqual( sequence.methodCalls[2].actions[2].file.name, '/data/file3' ) self.assertEqual( sequence.methodCalls[2].actions[3].file.name, '/data/file4' ) self.assertEqual( sequence.methodCalls[2].actions[0].status, 'Successful' ) self.assertEqual( sequence.methodCalls[2].actions[1].status, 'Failed' ) self.assertEqual( sequence.methodCalls[2].actions[2].status, 'Successful' ) self.assertEqual( sequence.methodCalls[2].actions[3].status, 'Failed' ) sequence.methodCalls[3].actions.sort( key = lambda x: x.file.name ) self.assertEqual( sequence.methodCalls[3].name.name, 'TestStorageElement.getFileSize' ) self.assertEqual( sequence.methodCalls[3].actions[0].file.name, '/data/file1' ) self.assertEqual( sequence.methodCalls[3].actions[1].file.name, '/data/file2' ) self.assertEqual( sequence.methodCalls[3].actions[2].file.name, '/data/file3' ) self.assertEqual( sequence.methodCalls[3].actions[3].file.name, '/data/file4' ) self.assertEqual( sequence.methodCalls[3].actions[0].status, 'Successful' ) self.assertEqual( sequence.methodCalls[3].actions[1].status, 'Failed' ) self.assertEqual( sequence.methodCalls[3].actions[2].status, 'Successful' ) self.assertEqual( sequence.methodCalls[3].actions[3].status, 'Failed' )
targets = [] for x in range(4): targets.append(targetSE + str(random.randint(0, randomMax))) for call in calls: for x in range(2): call.addAction( DLAction(DLFile(files[x * 2]), 'Successful', DLStorageElement(sources[x * 2]), DLStorageElement(targets[x * 2]), blob, None, None)) call.addAction( DLAction(DLFile(files[x * 2 + 1]), 'Failed', DLStorageElement(sources[x * 2 + 1]), DLStorageElement(targets[x * 2 + 1]), blob, 'errorMessage', random.randint(1, 1999))) return sequence done = False start = time.time() client = DataLoggingClient(url=servAddress) while not done: seq = makeSequence() res = client.insertSequence(seq) if not res['OK']: print 'error %s' % res['Message'] if (time.time() - start > maxDuration): done = True
def dataLoggingClient(cls): """ DataLoggingClient getter """ if not cls.__dataLoggingClient: from DIRAC.DataManagementSystem.Client.DataLoggingClient import DataLoggingClient cls.__dataLoggingClient = DataLoggingClient() return cls.__dataLoggingClient
def initialize(self): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity("Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM) gMonitor.registerActivity("Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM) gMonitor.registerActivity("Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) gMonitor.registerActivity("File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM) self.maxNumberOfThreads = self.am_getOption('NumberOfThreads', 1) self.threadPoolDepth = self.am_getOption('ThreadPoolDepth', 1) self.threadPool = ThreadPool(1, self.maxNumberOfThreads) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption('shifterProxy', 'DataManager') return S_OK()
class RegistrationAgent( AgentModule, RequestAgentMixIn ): def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK() def execute( self ): for i in range( self.threadPoolDepth ): requestExecutor = ThreadedJob( self.executeRequest ) self.threadPool.queueJob( requestExecutor ) self.threadPool.processResults() return self.executeRequest() def executeRequest( self ): ################################################ # Get a request from request DB res = self.RequestDBClient.getRequest( 'register' ) if not res['OK']: gLogger.info( "RegistrationAgent.execute: Failed to get request from database." ) return S_OK() elif not res['Value']: gLogger.info( "RegistrationAgent.execute: No requests to be executed found." ) return S_OK() requestString = res['Value']['RequestString'] requestName = res['Value']['RequestName'] sourceServer = res['Value']['Server'] try: jobID = int( res['Value']['JobID'] ) except: jobID = 0 gLogger.info( "RegistrationAgent.execute: Obtained request %s" % requestName ) result = self.RequestDBClient.getCurrentExecutionOrder( requestName, sourceServer ) if result['OK']: currentOrder = result['Value'] else: return S_OK( 'Can not get the request execution order' ) oRequest = RequestContainer( request = requestString ) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests( 'register' ) if not res['OK']: errStr = "RegistrationAgent.execute: Failed to obtain number of transfer subrequests." gLogger.error( errStr, res['Message'] ) return S_OK() gLogger.info( "RegistrationAgent.execute: Found %s sub requests." % res['Value'] ) ################################################ # For all the sub-requests in the request modified = False for ind in range( res['Value'] ): gLogger.info( "RegistrationAgent.execute: Processing sub-request %s." % ind ) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'register' )['Value'] subExecutionOrder = int( subRequestAttributes['ExecutionOrder'] ) subStatus = subRequestAttributes['Status'] if subStatus == 'Waiting' and subExecutionOrder <= currentOrder: subRequestFiles = oRequest.getSubRequestFiles( ind, 'register' )['Value'] operation = subRequestAttributes['Operation'] ################################################ # If the sub-request is a register file operation if operation == 'registerFile': gLogger.info( "RegistrationAgent.execute: Attempting to execute %s sub-request." % operation ) diracSE = str( subRequestAttributes['TargetSE'] ) if diracSE == 'SE': # We do not care about SE, put any there diracSE = "CERN-FAILOVER" catalog = subRequestAttributes['Catalogue'] if catalog == "None": catalog = '' subrequest_done = True for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = subRequestFile.get( 'LFN', '' ) if lfn: lfn = str( lfn ) physicalFile = subRequestFile.get( 'PFN', '' ) if physicalFile: physicalFile = str( physicalFile ) fileSize = subRequestFile.get( 'Size', 0 ) if fileSize: fileSize = int( fileSize ) fileGuid = subRequestFile.get( 'GUID', '' ) if fileGuid: fileGuid = str( fileGuid ) checksum = subRequestFile.get( 'Addler', '' ) if checksum: checksum = str( checksum ) if catalog == 'BookkeepingDB': diracSE = 'CERN-HIST' fileTuple = ( lfn, physicalFile, fileSize, diracSE, fileGuid, checksum ) res = self.ReplicaManager.registerFile( fileTuple, catalog ) print res if not res['OK']: self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'RegistrationAgent' ) errStr = "RegistrationAgent.execute: Completely failed to register file." gLogger.error( errStr, res['Message'] ) subrequest_done = False elif lfn in res['Value']['Failed'].keys(): self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'RegistrationAgent' ) errStr = "RegistrationAgent.execute: Completely failed to register file." gLogger.error( errStr, res['Value']['Failed'][lfn] ) subrequest_done = False else: self.DataLog.addFileRecord( lfn, 'Register', diracSE, '', 'TransferAgent' ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: gLogger.info( "RegistrationAgent.execute: File already completed." ) if subrequest_done: oRequest.setSubRequestStatus( ind, 'register', 'Done' ) ################################################ # If the sub-request is none of the above types else: gLogger.error( "RegistrationAgent.execute: Operation not supported.", operation ) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty( ind, 'register' )['Value']: oRequest.setSubRequestStatus( ind, 'register', 'Done' ) ################################################ # If the sub-request is already in terminal state else: gLogger.info( "RegistrationAgent.execute: Sub-request %s is status '%s' and not to be executed." % ( ind, subRequestAttributes['Status'] ) ) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest( requestName, requestString, sourceServer ) if modified and jobID: result = self.finalizeRequest( requestName, jobID, sourceServer ) return S_OK()
files.append( dictLong['files'] + str( random.randint( 0, randomMax ) ) + '.data' ) sources = [] for x in range( 4 ): sources.append( dictLong['srcSE'] + str( random.randint( 0, randomMax ) ) ) targets = [] for x in range( 4 ): targets.append( dictLong['targetSE'] + str( random.randint( 0, randomMax ) ) ) for call in calls : for x in range( 2 ): call.addAction( DLAction( DLFile( files[x * 2] ) , 'Successful' , DLStorageElement( sources[x * 2] ), DLStorageElement( targets[x * 2] ), dictLong['blob'], 'errorMessage', random.randint( 0, 1050 ) ) ) call.addAction( DLAction( DLFile( files[x * 2 + 1 ] ) , 'Failed', DLStorageElement( sources[x * 2 + 1 ] ), DLStorageElement( targets[x * 2 + 1] ), dictLong['blob'], 'errorMessage', random.randint( 0, 1050 ) ) ) return sequence client = DataLoggingClient( url = servAddress ) for i in range( 3000000 ) : seq = makeSequence() res = client.insertSequence( seq, directInsert = True ) if not res['OK']: print 'error %s' % res['Message']
class TransferAgent( AgentModule, RequestAgentMixIn ): def initialize( self ): self.RequestDBClient = RequestClient() self.ReplicaManager = ReplicaManager() self.DataLog = DataLoggingClient() gMonitor.registerActivity( "Iteration", "Agent Loops", "TransferAgent", "Loops/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Execute", "Request Processed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Done", "Request Completed", "TransferAgent", "Requests/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate and register", "Replicate and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replicate", "Replicate operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put and register", "Put and register operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put", "Put operations", "TransferAgent", "Attempts/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication successful", "Successful replications", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put successful", "Successful puts", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replication failed", "Failed replications", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Put failed", "Failed puts", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration successful", "Successful replica registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration successful", "Successful file registrations", "TransferAgent", "Successful/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "Replica registration failed", "Failed replica registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) gMonitor.registerActivity( "File registration failed", "Failed file registrations", "TransferAgent", "Failed/min", gMonitor.OP_SUM ) self.maxNumberOfThreads = self.am_getOption( 'NumberOfThreads', 1 ) self.threadPoolDepth = self.am_getOption( 'ThreadPoolDepth', 1 ) self.threadPool = ThreadPool( 1, self.maxNumberOfThreads ) # This sets the Default Proxy to used as that defined under # /Operations/Shifter/DataManager # the shifterProxy option in the Configuration can be used to change this default. self.am_setOption( 'shifterProxy', 'DataManager' ) return S_OK() def execute( self ): for i in range( self.threadPoolDepth ): requestExecutor = ThreadedJob( self.executeRequest ) self.threadPool.queueJob( requestExecutor ) self.threadPool.processResults() return self.executeRequest() def executeRequest( self ): ################################################ # Get a request from request DB gMonitor.addMark( "Iteration", 1 ) res = self.RequestDBClient.getRequest( 'transfer' ) if not res['OK']: gLogger.info( "TransferAgent.execute: Failed to get request from database." ) return S_OK() elif not res['Value']: gLogger.info( "TransferAgent.execute: No requests to be executed found." ) return S_OK() requestString = res['Value']['RequestString'] requestName = res['Value']['RequestName'] sourceServer = res['Value']['Server'] try: jobID = int( res['Value']['JobID'] ) except: jobID = 0 gLogger.info( "TransferAgent.execute: Obtained request %s" % requestName ) result = self.RequestDBClient.getCurrentExecutionOrder( requestName, sourceServer ) if result['OK']: currentOrder = result['Value'] else: return S_OK( 'Can not get the request execution order' ) oRequest = RequestContainer( request = requestString ) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests( 'transfer' ) if not res['OK']: errStr = "TransferAgent.execute: Failed to obtain number of transfer subrequests." gLogger.error( errStr, res['Message'] ) return S_OK() gLogger.info( "TransferAgent.execute: Found %s sub requests." % res['Value'] ) ################################################ # For all the sub-requests in the request modified = False for ind in range( res['Value'] ): gMonitor.addMark( "Execute", 1 ) gLogger.info( "TransferAgent.execute: Processing sub-request %s." % ind ) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'transfer' )['Value'] if subRequestAttributes['ExecutionOrder']: subExecutionOrder = int( subRequestAttributes['ExecutionOrder'] ) else: subExecutionOrder = 0 subStatus = subRequestAttributes['Status'] if subStatus == 'Waiting' and subExecutionOrder <= currentOrder: subRequestFiles = oRequest.getSubRequestFiles( ind, 'transfer' )['Value'] operation = subRequestAttributes['Operation'] subRequestError = '' ################################################ # If the sub-request is a put and register operation if operation == 'putAndRegister' or operation == 'putAndRegisterAndRemove': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation ) diracSE = str( subRequestAttributes['TargetSE'] ) catalog = '' if subRequestAttributes.has_key( 'Catalogue' ): catalog = subRequestAttributes['Catalogue'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark( "Put and register", 1 ) lfn = str( subRequestFile['LFN'] ) file = subRequestFile['PFN'] guid = subRequestFile['GUID'] addler = subRequestFile['Addler'] res = self.ReplicaManager.putAndRegister( lfn, file, diracSE, guid = guid, checksum = addler, catalog = catalog ) if res['OK']: if res['Value']['Successful'].has_key( lfn ): if not res['Value']['Successful'][lfn].has_key( 'put' ): gMonitor.addMark( "Put failed", 1 ) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent' ) gLogger.info( "TransferAgent.execute: Failed to put %s to %s." % ( lfn, diracSE ) ) subRequestError = "Put operation failed for %s to %s" % ( lfn, diracSE ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Put failed' ) elif not res['Value']['Successful'][lfn].has_key( 'register' ): gMonitor.addMark( "Put successful", 1 ) gMonitor.addMark( "File registration failed", 1 ) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent' ) self.DataLog.addFileRecord( lfn, 'RegisterFail', diracSE, '', 'TransferAgent' ) gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % ( lfn, diracSE, res['Value']['Successful'][lfn]['put'] ) ) gLogger.info( "TransferAgent.execute: Failed to register %s to %s." % ( lfn, diracSE ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Registration failed' ) subRequestError = "Registration failed for %s to %s" % ( lfn, diracSE ) fileDict = res['Value']['Failed'][lfn]['register'] registerRequestDict = {'Attributes':{'TargetSE': fileDict['TargetSE'], 'Operation':'registerFile'}, 'Files':[{'LFN': fileDict['LFN'], 'PFN':fileDict['PFN'], 'Size':fileDict['Size'], 'Addler':fileDict['Addler'], 'GUID':fileDict['GUID']}]} gLogger.info( "TransferAgent.execute: Setting registration request for failed file." ) oRequest.addSubRequest( registerRequestDict, 'register' ) modified = True else: gMonitor.addMark( "Put successful", 1 ) gMonitor.addMark( "File registration successful", 1 ) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent' ) self.DataLog.addFileRecord( lfn, 'Register', diracSE, '', 'TransferAgent' ) gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % ( lfn, diracSE, res['Value']['Successful'][lfn]['put'] ) ) gLogger.info( "TransferAgent.execute: Successfully registered %s to %s in %s seconds." % ( lfn, diracSE, res['Value']['Successful'][lfn]['register'] ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: gMonitor.addMark( "Put failed", 1 ) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent' ) errStr = "TransferAgent.execute: Failed to put and register file." gLogger.error( errStr, "%s %s %s" % ( lfn, diracSE, res['Value']['Failed'][lfn] ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Complete file failure' ) subRequestError = "Failed to put and register file" else: gMonitor.addMark( "Put failed", 1 ) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent' ) errStr = "TransferAgent.execute: Completely failed to put and register file." gLogger.error( errStr, res['Message'] ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'RM call failure' ) subRequestError = operation + " RM call file" else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a put operation elif operation == 'put': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation ) diracSE = subRequestAttributes['TargetSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark( "Put", 1 ) lfn = subRequestFile['LFN'] file = subRequestFile['PFN'] res = self.ReplicaManager.put( lfn, file, diracSE ) if res['OK']: if res['Value']['Successful'].has_key( lfn ): gMonitor.addMark( "Put successful", 1 ) self.DataLog.addFileRecord( lfn, 'Put', diracSE, '', 'TransferAgent' ) gLogger.info( "TransferAgent.execute: Successfully put %s to %s in %s seconds." % ( lfn, diracSE, res['Value']['Successful'][lfn] ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: gMonitor.addMark( "Put failed", 1 ) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent' ) errStr = "TransferAgent.execute: Failed to put file." gLogger.error( errStr, "%s %s %s" % ( lfn, diracSE, res['Value']['Failed'][lfn] ) ) subRequestError = "Put operation failed for %s to %s" % ( lfn, diracSE ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Put failed' ) else: gMonitor.addMark( "Put failed", 1 ) self.DataLog.addFileRecord( lfn, 'PutFail', diracSE, '', 'TransferAgent' ) errStr = "TransferAgent.execute: Completely failed to put file." gLogger.error( errStr, res['Message'] ) subRequestError = "Put RM call failed for %s to %s" % ( lfn, diracSE ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Put RM call failed' ) else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a replicate and register operation elif operation == 'replicateAndRegister' or operation == 'replicateAndRegisterAndRemove': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation ) targetSE = subRequestAttributes['TargetSE'] sourceSE = subRequestAttributes['SourceSE'] if sourceSE == "None": sourceSE = '' for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark( "Replicate and register", 1 ) lfn = subRequestFile['LFN'] res = self.ReplicaManager.replicateAndRegister( lfn, targetSE, sourceSE = sourceSE ) if res['OK']: if res['Value']['Successful'].has_key( lfn ): if not res['Value']['Successful'][lfn].has_key( 'replicate' ): gLogger.info( "TransferAgent.execute: Failed to replicate %s to %s." % ( lfn, targetSE ) ) gMonitor.addMark( "Replication failed", 1 ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,"Error", "Replication failed" ) subRequestError = "Replication failed for %s to %s" % ( lfn, targetSE ) elif not res['Value']['Successful'][lfn].has_key( 'register' ): gMonitor.addMark( "Replication successful", 1 ) gMonitor.addMark( "Replica registration failed", 1 ) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % ( lfn, targetSE, res['Value']['Successful'][lfn]['replicate'] ) ) gLogger.info( "TransferAgent.execute: Failed to register %s to %s." % ( lfn, targetSE ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Error', 'Registration failed' ) subRequestError = "Registration failed for %s to %s" % ( lfn, targetSE ) fileDict = res['Value']['Failed'][lfn]['register'] registerRequestDict = {'Attributes':{'TargetSE': fileDict['TargetSE'], 'Operation':'registerReplica'}, 'Files':[{'LFN': fileDict['LFN'], 'PFN':fileDict['PFN']}]} gLogger.info( "TransferAgent.execute: Setting registration request for failed replica." ) oRequest.addSubRequest( registerRequestDict, 'register' ) modified = True else: gMonitor.addMark( "Replication successful", 1 ) gMonitor.addMark( "Replica registration successful", 1 ) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % ( lfn, targetSE, res['Value']['Successful'][lfn]['replicate'] ) ) gLogger.info( "TransferAgent.execute: Successfully registered %s to %s in %s seconds." % ( lfn, targetSE, res['Value']['Successful'][lfn]['register'] ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: gMonitor.addMark( "Replication failed", 1 ) errStr = "TransferAgent.execute: Failed to replicate and register file." gLogger.error( errStr, "%s %s %s" % ( lfn, targetSE, res['Value']['Failed'][lfn] ) ) else: gMonitor.addMark( "Replication failed", 1 ) errStr = "TransferAgent.execute: Completely failed to replicate and register file." gLogger.error( errStr, res['Message'] ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'RM call failure' ) subRequestError = operation + " RM call failed" else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a replicate operation elif operation == 'replicate': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation ) targetSE = subRequestAttributes['TargetSE'] sourceSE = subRequestAttributes['SourceSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': gMonitor.addMark( "Replicate", 1 ) lfn = subRequestFile['LFN'] res = self.ReplicaManager.replicate( lfn, targetSE, sourceSE = sourceSE ) if res['OK']: if res['Value']['Successful'].has_key( lfn ): gMonitor.addMark( "Replication successful", 1 ) gLogger.info( "TransferAgent.execute: Successfully replicated %s to %s in %s seconds." % ( lfn, diracSE, res['Value']['Successful'][lfn] ) ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: gMonitor.addMark( "Replication failed", 1 ) errStr = "TransferAgent.execute: Failed to replicate file." gLogger.error( errStr, "%s %s %s" % ( lfn, targetSE, res['Value']['Failed'][lfn] ) ) subRequestError = "Replicate operation failed for %s to %s" % ( lfn, targetSE ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Put failed' ) else: gMonitor.addMark( "Replication failed", 1 ) errStr = "TransferAgent.execute: Completely failed to replicate file." gLogger.error( errStr, res['Message'] ) subRequestError = "Replicate RM call failed for %s to %s" % ( lfn, targetSE ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn,'Error', 'Replicate RM call failed' ) else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is a get operation elif operation == 'get': gLogger.info( "TransferAgent.execute: Attempting to execute %s sub-request." % operation ) sourceSE = subRequestAttributes['TargetSE'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = str( subRequestFile['LFN'] ) pfn = str( subRequestFile['PFN'] ) got = False if sourceSE and pfn: res = self.ReplicaManager.getStorageFile( pfn, sourceSE ) if res['Value']['Successful'].has_key( pfn ): got = True else: res = self.ReplicaManager.getFile( lfn ) if res['Value']['Successful'].has_key( lfn ): got = False if got: gLogger.info( "TransferAgent.execute: Successfully got %s." % lfn ) oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' ) modified = True else: errStr = "TransferAgent.execute: Failed to get file." gLogger.error( errStr, lfn ) else: gLogger.info( "TransferAgent.execute: File already completed." ) ################################################ # If the sub-request is none of the above types else: gLogger.error( "TransferAgent.execute: Operation not supported.", operation ) if subRequestError: oRequest.setSubRequestAttributeValue( ind, 'transfer', 'Error', subRequestError ) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty( ind, 'transfer' )['Value']: oRequest.setSubRequestStatus( ind, 'transfer', 'Done' ) gMonitor.addMark( "Done", 1 ) ################################################ # If the sub-request is already in terminal state else: gLogger.info( "TransferAgent.execute: Sub-request %s is status '%s' and not to be executed." % ( ind, subRequestAttributes['Status'] ) ) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest( requestName, requestString, sourceServer ) if modified and jobID: result = self.finalizeRequest( requestName, jobID, sourceServer ) return S_OK()