def pushNewResults(): #cd to temp folder to temporary save the zip files os.chdir(temp_save_path) logger.warning('Checking results directory for new added zip files...') from DIRAC.Core.Base.Script import initialize #from DIRAC import gLogger #gLogger.setLevel("DEBUG") initialize(ignoreErrors = True, enableCommandLine = False) from DIRAC.Resources.Storage.StorageElement import StorageElement statSE = StorageElement(diracStorageElementName) #print diracStorageElementFolder print "Before listDirectory" dirDict = statSE.listDirectory(diracStorageElementFolder) print "After listDirectory" print dirDict for zipResult in dirDict['Value']['Successful'][diracStorageElementFolder]['Files']: fileName, fileExtension = os.path.splitext(zipResult) #get the File, copy the file to the current local directory res = statSE.getFile(os.path.join(diracStorageElementFolder, zipResult)) if not res['OK'] or ( res['OK'] and len(res['Value']['Failed']) > 0): logger.errot("Failed download of " + zipResult) continue results_list = AddedResults.objects.filter(identifier__exact=fileName) res = True if not results_list: logger.info('New zip: {0}, found in results directory, calling pushZip command...'.format(zipResult)) res = pushZip.pushThis(os.path.join(temp_save_path, zipResult )) if not res: logger.error("Error pushing results, not removing") continue #remove it from the upload_test folder statSE.removeFile(os.path.join(diracStorageElementFolder, zipResult)) #put the file into the added folder statSE.putFile({ os.path.join(addedDiracStorageFolder, zipResult) : zipResult}) #also remove the file from the current directory os.remove(os.path.join(temp_save_path, zipResult))
def execute( self ): IntegrityDB = RPCClient( 'DataManagement/DataIntegrity' ) res = self.RequestDBClient.getRequest( 'integrity' ) if not res['OK']: gLogger.info( "SEvsLFCAgent.execute: Failed to get request from database." ) return S_OK() elif not res['Value']: gLogger.info( "SEvsLFCAgent.execute: No requests to be executed found." ) return S_OK() requestString = res['Value']['requestString'] requestName = res['Value']['requestName'] sourceServer = res['Value']['Server'] gLogger.info( "SEvsLFCAgent.execute: Obtained request %s" % requestName ) oRequest = RequestContainer( request = requestString ) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests( 'integrity' ) if not res['OK']: errStr = "SEvsLFCAgent.execute: Failed to obtain number of integrity subrequests." gLogger.error( errStr, res['Message'] ) return S_OK() gLogger.info( "SEvsLFCAgent.execute: Found %s sub requests." % res['Value'] ) ################################################ # For all the sub-requests in the request for ind in range( res['Value'] ): gLogger.info( "SEvsLFCAgent.execute: Processing sub-request %s." % ind ) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'integrity' )['Value'] if subRequestAttributes['Status'] == 'Waiting': subRequestFiles = oRequest.getSubRequestFiles( ind, 'integrity' )['Value'] operation = subRequestAttributes['Operation'] ################################################ # If the sub-request is a lfcvsse operation if operation == 'SEvsLFC': gLogger.info( "SEvsLFCAgent.execute: Attempting to execute %s sub-request." % operation ) storageElementName = subRequestAttributes['StorageElement'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = subRequestFile['LFN'] storageElement = StorageElement( storageElementName ) res = storageElement.isValid() if not res['OK']: errStr = "SEvsLFCAgent.execute: Failed to instantiate destination StorageElement." gLogger.error( errStr, storageElement ) else: res = storageElement.getPfnForLfn( lfn ) if not res['OK']: gLogger.info( 'shit bugger do something.' ) else: oNamespaceBrowser = NamespaceBrowser( res['Value'] ) # Loop over all the directories and sub-directories while ( oNamespaceBrowser.isActive() ): currentDir = oNamespaceBrowser.getActiveDir() gLogger.info( "SEvsLFCAgent.execute: Attempting to list the contents of %s." % currentDir ) res = storageElement.listDirectory( currentDir ) if not res['Value']['Successful'].has_key( currentDir ): gLogger.error( "SEvsLFCAgent.execute: Failed to list the directory contents.", "%s %s" % ( currentDir, res['Value']['Successful']['Failed'][currentDir] ) ) subDirs = [currentDir] else: subDirs = [] files = {} for surl, surlDict in res['Value']['Successful'][currentDir]['Files'].items(): pfnRes = storageElement.getPfnForProtocol( surl, 'SRM2', withPort = False ) surl = pfnRes['Value'] files[surl] = surlDict for surl, surlDict in res['Value']['Successful'][currentDir]['SubDirs'].items(): pfnRes = storageElement.getPfnForProtocol( surl, 'SRM2', withPort = False ) surl = pfnRes['Value'] subDirs.append( surl ) #subDirs = res['Value']['Successful'][currentDir]['SubDirs'] gLogger.info( "SEvsLFCAgent.execute: Successfully obtained %s sub-directories." % len( subDirs ) ) #files = res['Value']['Successful'][currentDir]['Files'] gLogger.info( "SEvsLFCAgent.execute: Successfully obtained %s files." % len( files ) ) selectedLfns = [] lfnPfnDict = {} pfnSize = {} for pfn, pfnDict in files.items(): res = storageElement.getPfnPath( pfn ) if not res['OK']: gLogger.error( "SEvsLFCAgent.execute: Failed to get determine LFN from pfn.", "%s %s" % ( pfn, res['Message'] ) ) fileMetadata = {'Prognosis':'NonConventionPfn', 'LFN':'', 'PFN':pfn, 'StorageElement':storageElementName, 'Size':pfnDict['Size']} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: lfn = res['Value'] selectedLfns.append( lfn ) lfnPfnDict[lfn] = pfn pfnSize[pfn] = pfnDict['Size'] res = self.ReplicaManager.getCatalogFileMetadata( selectedLfns ) if not res['OK']: subDirs = [currentDir] else: for lfn in res['Value']['Failed'].keys(): gLogger.error( "SEvsLFCAgent.execute: Failed to get metadata.", "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) ) pfn = lfnPfnDict[lfn] fileMetadata = {'Prognosis':'SEPfnNoLfn', 'LFN':lfn, 'PFN':pfn, 'StorageElement':storageElementName, 'Size':pfnSize[pfn]} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) for lfn, lfnDict in res['Value']['Successful'].items(): pfn = lfnPfnDict[lfn] storageSize = pfnSize[pfn] catalogSize = lfnDict['Size'] if int( catalogSize ) == int( storageSize ): gLogger.info( "SEvsLFCAgent.execute: Catalog and storage sizes match.", "%s %s" % ( pfn, storageElementName ) ) gLogger.info( "Change the status in the LFC" ) elif int( storageSize ) == 0: gLogger.error( "SEvsLFCAgent.execute: Physical file size is 0.", "%s %s" % ( pfn, storageElementName ) ) fileMetadata = {'Prognosis':'ZeroSizePfn', 'LFN':lfn, 'PFN':pfn, 'StorageElement':storageElementName} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: gLogger.error( "SEvsLFCAgent.execute: Catalog and storage size mis-match.", "%s %s" % ( pfn, storageElementName ) ) fileMetadata = {'Prognosis':'PfnSizeMismatch', 'LFN':lfn, 'PFN':pfn, 'StorageElement':storageElementName} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) res = self.ReplicaManager.getCatalogReplicas( selectedLfns ) if not res['OK']: subDirs = [currentDir] else: for lfn in res['Value']['Failed'].keys(): gLogger.error( "SEvsLFCAgent.execute: Failed to get replica information.", "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) ) pfn = lfnPfnDict[lfn] fileMetadata = {'Prognosis':'PfnNoReplica', 'LFN':lfn, 'PFN':pfn, 'StorageElement':storageElementName, 'Size':pfnSize[pfn]} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) for lfn, repDict in res['Value']['Successful'].items(): pfn = lfnPfnDict[lfn] registeredPfns = repDict.values() if not pfn in registeredPfns: gLogger.error( "SEvsLFCAgent.execute: SE PFN not registered.", "%s %s" % ( lfn, pfn ) ) fileMetadata = {'Prognosis':'PfnNoReplica', 'LFN':lfn, 'PFN':pfn, 'StorageElement':storageElementName} res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata ) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: gLogger.info( "SEvsLFCAgent.execute: SE Pfn verified.", pfn ) oNamespaceBrowser.updateDirs( subDirs ) oRequest.setSubRequestFileAttributeValue( ind, 'integrity', lfn, 'Status', 'Done' ) ################################################ # If the sub-request is none of the above types else: gLogger.info( "SEvsLFCAgent.execute: Operation not supported.", operation ) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty( ind, 'integrity' )['Value']: oRequest.setSubRequestStatus( ind, 'integrity', 'Done' ) ################################################ # If the sub-request is already in terminal state else: gLogger.info( "SEvsLFCAgent.execute: Sub-request %s is status '%s' and not to be executed." % ( ind, subRequestAttributes['Status'] ) ) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest( requestName, requestString, sourceServer ) return S_OK()
def __getStorageDirectoryContents(self, lfnDir, storageElement): """ Obtians the contents of the supplied directory on the storage """ gLogger.info('Obtaining the contents for %s directories at %s' % (len(lfnDir), storageElement)) se = StorageElement(storageElement) res = se.exists(lfnDir) if not res['OK']: gLogger.error("Failed to obtain existance of directories", res['Message']) return res for directory, error in res['Value']['Failed'].items(): gLogger.error('Failed to determine existance of directory', '%s %s' % (directory, error)) if res['Value']['Failed']: return S_ERROR('Failed to determine existance of directory') directoryExists = res['Value']['Successful'] activeDirs = [] for directory in sorted(directoryExists): exists = directoryExists[directory] if exists: activeDirs.append(directory) allFiles = {} while len(activeDirs) > 0: currentDir = activeDirs[0] res = se.listDirectory(currentDir) activeDirs.remove(currentDir) if not res['OK']: gLogger.error('Failed to get directory contents', res['Message']) return res elif currentDir in res['Value']['Failed']: gLogger.error( 'Failed to get directory contents', '%s %s' % (currentDir, res['Value']['Failed'][currentDir])) return S_ERROR(res['Value']['Failed'][currentDir]) else: dirContents = res['Value']['Successful'][currentDir] activeDirs.extend( se.getLFNFromURL(dirContents['SubDirs']).get( 'Value', {}).get('Successful', [])) fileURLMetadata = dirContents['Files'] fileMetadata = {} res = se.getLFNFromURL(fileURLMetadata) if not res['OK']: gLogger.error('Failed to get directory content LFNs', res['Message']) return res for url, error in res['Value']['Failed'].items(): gLogger.error("Failed to get LFN for URL", "%s %s" % (url, error)) if res['Value']['Failed']: return S_ERROR("Failed to get LFNs for PFNs") urlLfns = res['Value']['Successful'] for urlLfn, lfn in urlLfns.items(): fileMetadata[lfn] = fileURLMetadata[urlLfn] allFiles.update(fileMetadata) zeroSizeFiles = [] for lfn in sorted(allFiles): if os.path.basename(lfn) == 'dirac_directory': allFiles.pop(lfn) else: metadata = allFiles[lfn] if metadata['Size'] == 0: zeroSizeFiles.append( (lfn, 'deprecatedUrl', storageElement, 'PFNZeroSize')) if zeroSizeFiles: self.__reportProblematicReplicas(zeroSizeFiles, storageElement, 'PFNZeroSize') gLogger.info('Obtained at total of %s files for directories at %s' % (len(allFiles), storageElement)) return S_OK(allFiles)
def getStorageDirectoryContents(self, lfnDir, storageElement): """ This takes the supplied lfn directories and recursively obtains the files in the supplied storage element """ gLogger.info('Obtaining the contents for %s directories at %s' % (len(lfnDir), storageElement)) se = StorageElement(storageElement) res = se.exists(lfnDir) if not res['OK']: gLogger.error( "Failed to obtain existance of directories", res['Message']) return res for directory, error in res['Value']['Failed'].iteritems(): gLogger.error('Failed to determine existance of directory', '%s %s' % (directory, error)) if res['Value']['Failed']: return S_ERROR(errno.ENOENT, 'Failed to determine existance of directory') directoryExists = res['Value']['Successful'] activeDirs = [] for directory in sorted(directoryExists): exists = directoryExists[directory] if exists: activeDirs.append(directory) allFiles = {} while len(activeDirs) > 0: currentDir = activeDirs[0] res = se.listDirectory(currentDir) activeDirs.remove(currentDir) if not res['OK']: gLogger.error('Failed to get directory contents', res['Message']) return res elif currentDir in res['Value']['Failed']: gLogger.error('Failed to get directory contents', '%s %s' % (currentDir, res['Value']['Failed'][currentDir])) return S_ERROR(errno.ENOENT, res['Value']['Failed'][currentDir]) else: dirContents = res['Value']['Successful'][currentDir] activeDirs.extend(se.getLFNFromURL(dirContents['SubDirs']).get( 'Value', {}).get('Successful', [])) fileURLMetadata = dirContents['Files'] fileMetadata = {} res = se.getLFNFromURL(fileURLMetadata) if not res['OK']: gLogger.error('Failed to get directory content LFNs', res['Message']) return res for url, error in res['Value']['Failed'].iteritems(): gLogger.error("Failed to get LFN for URL", "%s %s" % (url, error)) if res['Value']['Failed']: return S_ERROR(errno.ENOENT, "Failed to get LFNs for PFNs") urlLfns = res['Value']['Successful'] for urlLfn, lfn in urlLfns.iteritems(): fileMetadata[lfn] = fileURLMetadata[urlLfn] allFiles.update(fileMetadata) zeroSizeFiles = [] for lfn in sorted(allFiles): if os.path.basename(lfn) == 'dirac_directory': allFiles.pop(lfn) else: metadata = allFiles[lfn] if not metadata['Size']: zeroSizeFiles.append( (lfn, 'deprecatedUrl', storageElement, 'PFNZeroSize')) if zeroSizeFiles: self.dic.reportProblematicReplicas( zeroSizeFiles, storageElement, 'PFNZeroSize') gLogger.info('Obtained at total of %s files for directories at %s' % (len(allFiles), storageElement)) return S_OK(allFiles)
def __getStorageDirectoryContents( self, lfnDir, storageElement ): """ Obtians the contents of the supplied directory on the storage """ gLogger.info( 'Obtaining the contents for %s directories at %s' % ( len( lfnDir ), storageElement ) ) se = StorageElement( storageElement ) res = se.getPfnForLfn( lfnDir ) if not res['OK']: gLogger.error( "Failed to get PFNs for directories", res['Message'] ) return res for directory, error in res['Value']['Failed'].items(): gLogger.error( 'Failed to obtain directory PFN from LFNs', '%s %s' % ( directory, error ) ) if res['Value']['Failed']: return S_ERROR( 'Failed to obtain directory PFN from LFNs' ) storageDirectories = res['Value']['Successful'].values() res = se.exists( storageDirectories ) if not res['OK']: gLogger.error( "Failed to obtain existance of directories", res['Message'] ) return res for directory, error in res['Value']['Failed'].items(): gLogger.error( 'Failed to determine existance of directory', '%s %s' % ( directory, error ) ) if res['Value']['Failed']: return S_ERROR( 'Failed to determine existance of directory' ) directoryExists = res['Value']['Successful'] activeDirs = [] for directory in sortList( directoryExists.keys() ): exists = directoryExists[directory] if exists: activeDirs.append( directory ) allFiles = {} while len( activeDirs ) > 0: currentDir = activeDirs[0] res = se.listDirectory( currentDir ) activeDirs.remove( currentDir ) if not res['OK']: gLogger.error( 'Failed to get directory contents', res['Message'] ) return res elif res['Value']['Failed'].has_key( currentDir ): gLogger.error( 'Failed to get directory contents', '%s %s' % ( currentDir, res['Value']['Failed'][currentDir] ) ) return S_ERROR( res['Value']['Failed'][currentDir] ) else: dirContents = res['Value']['Successful'][currentDir] activeDirs.extend( dirContents['SubDirs'] ) fileMetadata = dirContents['Files'] # RF_NOTE This ugly trick is needed because se.getPfnPath does not follow the Successful/Failed convention # res = { "Successful" : {}, "Failed" : {} } # for pfn in fileMetadata: # inRes = se.getPfnPath( pfn ) # if inRes["OK"]: # res["Successful"][pfn] = inRes["Value"] # else: # res["Failed"][pfn] = inRes["Message"] res = se.getLfnForPfn( fileMetadata.keys() ) if not res['OK']: gLogger.error( 'Failed to get directory content LFNs', res['Message'] ) return res for pfn, error in res['Value']['Failed'].items(): gLogger.error( "Failed to get LFN for PFN", "%s %s" % ( pfn, error ) ) if res['Value']['Failed']: return S_ERROR( "Failed to get LFNs for PFNs" ) pfnLfns = res['Value']['Successful'] for pfn, lfn in pfnLfns.items(): fileMetadata[pfn]['LFN'] = lfn allFiles.update( fileMetadata ) zeroSizeFiles = [] lostFiles = [] unavailableFiles = [] for pfn in sortList( allFiles.keys() ): if os.path.basename( pfn ) == 'dirac_directory': allFiles.pop( pfn ) else: metadata = allFiles[pfn] if metadata['Size'] == 0: zeroSizeFiles.append( ( metadata['LFN'], pfn, storageElement, 'PFNZeroSize' ) ) # if metadata['Lost']: # lostFiles.append((metadata['LFN'],pfn,storageElement,'PFNLost')) # if metadata['Unavailable']: # unavailableFiles.append((metadata['LFN'],pfn,storageElement,'PFNUnavailable')) if zeroSizeFiles: self.__reportProblematicReplicas( zeroSizeFiles, storageElement, 'PFNZeroSize' ) if lostFiles: self.__reportProblematicReplicas( lostFiles, storageElement, 'PFNLost' ) if unavailableFiles: self.__reportProblematicReplicas( unavailableFiles, storageElement, 'PFNUnavailable' ) gLogger.info( 'Obtained at total of %s files for directories at %s' % ( len( allFiles ), storageElement ) ) return S_OK( allFiles )
def execute(self): IntegrityDB = RPCClient('DataManagement/DataIntegrity') res = self.RequestDBClient.getRequest('integrity') if not res['OK']: gLogger.info( "SEvsLFCAgent.execute: Failed to get request from database.") return S_OK() elif not res['Value']: gLogger.info( "SEvsLFCAgent.execute: No requests to be executed found.") return S_OK() requestString = res['Value']['requestString'] requestName = res['Value']['requestName'] sourceServer = res['Value']['Server'] gLogger.info("SEvsLFCAgent.execute: Obtained request %s" % requestName) oRequest = RequestContainer(request=requestString) ################################################ # Find the number of sub-requests from the request res = oRequest.getNumSubRequests('integrity') if not res['OK']: errStr = "SEvsLFCAgent.execute: Failed to obtain number of integrity subrequests." gLogger.error(errStr, res['Message']) return S_OK() gLogger.info("SEvsLFCAgent.execute: Found %s sub requests." % res['Value']) ################################################ # For all the sub-requests in the request for ind in range(res['Value']): gLogger.info("SEvsLFCAgent.execute: Processing sub-request %s." % ind) subRequestAttributes = oRequest.getSubRequestAttributes( ind, 'integrity')['Value'] if subRequestAttributes['Status'] == 'Waiting': subRequestFiles = oRequest.getSubRequestFiles( ind, 'integrity')['Value'] operation = subRequestAttributes['Operation'] ################################################ # If the sub-request is a lfcvsse operation if operation == 'SEvsLFC': gLogger.info( "SEvsLFCAgent.execute: Attempting to execute %s sub-request." % operation) storageElementName = subRequestAttributes['StorageElement'] for subRequestFile in subRequestFiles: if subRequestFile['Status'] == 'Waiting': lfn = subRequestFile['LFN'] storageElement = StorageElement(storageElementName) res = storageElement.isValid() if not res['OK']: errStr = "SEvsLFCAgent.execute: Failed to instantiate destination StorageElement." gLogger.error(errStr, storageElement) else: res = storageElement.getPfnForLfn(lfn) if not res['OK']: gLogger.info('shit bugger do something.') else: oNamespaceBrowser = NamespaceBrowser( res['Value']) # Loop over all the directories and sub-directories while (oNamespaceBrowser.isActive()): currentDir = oNamespaceBrowser.getActiveDir( ) gLogger.info( "SEvsLFCAgent.execute: Attempting to list the contents of %s." % currentDir) res = storageElement.listDirectory( currentDir) if not res['Value'][ 'Successful'].has_key( currentDir): gLogger.error( "SEvsLFCAgent.execute: Failed to list the directory contents.", "%s %s" % (currentDir, res['Value']['Successful'] ['Failed'][currentDir])) subDirs = [currentDir] else: subDirs = [] files = {} for surl, surlDict in res['Value'][ 'Successful'][currentDir][ 'Files'].items(): pfnRes = storageElement.getPfnForProtocol( surl, 'SRM2', withPort=False) surl = pfnRes['Value'] files[surl] = surlDict for surl, surlDict in res['Value'][ 'Successful'][currentDir][ 'SubDirs'].items(): pfnRes = storageElement.getPfnForProtocol( surl, 'SRM2', withPort=False) surl = pfnRes['Value'] subDirs.append(surl) #subDirs = res['Value']['Successful'][currentDir]['SubDirs'] gLogger.info( "SEvsLFCAgent.execute: Successfully obtained %s sub-directories." % len(subDirs)) #files = res['Value']['Successful'][currentDir]['Files'] gLogger.info( "SEvsLFCAgent.execute: Successfully obtained %s files." % len(files)) selectedLfns = [] lfnPfnDict = {} pfnSize = {} for pfn, pfnDict in files.items(): res = storageElement.getPfnPath( pfn) if not res['OK']: gLogger.error( "SEvsLFCAgent.execute: Failed to get determine LFN from pfn.", "%s %s" % (pfn, res['Message'])) fileMetadata = { 'Prognosis': 'NonConventionPfn', 'LFN': '', 'PFN': pfn, 'StorageElement': storageElementName, 'Size': pfnDict['Size'] } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: lfn = res['Value'] selectedLfns.append(lfn) lfnPfnDict[lfn] = pfn pfnSize[pfn] = pfnDict[ 'Size'] res = self.ReplicaManager.getCatalogFileMetadata( selectedLfns) if not res['OK']: subDirs = [currentDir] else: for lfn in res['Value'][ 'Failed'].keys(): gLogger.error( "SEvsLFCAgent.execute: Failed to get metadata.", "%s %s" % (lfn, res['Value'] ['Failed'][lfn])) pfn = lfnPfnDict[lfn] fileMetadata = { 'Prognosis': 'SEPfnNoLfn', 'LFN': lfn, 'PFN': pfn, 'StorageElement': storageElementName, 'Size': pfnSize[pfn] } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) for lfn, lfnDict in res[ 'Value'][ 'Successful'].items( ): pfn = lfnPfnDict[lfn] storageSize = pfnSize[pfn] catalogSize = lfnDict[ 'Size'] if int(catalogSize) == int( storageSize): gLogger.info( "SEvsLFCAgent.execute: Catalog and storage sizes match.", "%s %s" % (pfn, storageElementName )) gLogger.info( "Change the status in the LFC" ) elif int(storageSize) == 0: gLogger.error( "SEvsLFCAgent.execute: Physical file size is 0.", "%s %s" % (pfn, storageElementName )) fileMetadata = { 'Prognosis': 'ZeroSizePfn', 'LFN': lfn, 'PFN': pfn, 'StorageElement': storageElementName } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: gLogger.error( "SEvsLFCAgent.execute: Catalog and storage size mis-match.", "%s %s" % (pfn, storageElementName )) fileMetadata = { 'Prognosis': 'PfnSizeMismatch', 'LFN': lfn, 'PFN': pfn, 'StorageElement': storageElementName } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) res = self.ReplicaManager.getCatalogReplicas( selectedLfns) if not res['OK']: subDirs = [currentDir] else: for lfn in res['Value'][ 'Failed'].keys(): gLogger.error( "SEvsLFCAgent.execute: Failed to get replica information.", "%s %s" % (lfn, res['Value'] ['Failed'][lfn])) pfn = lfnPfnDict[lfn] fileMetadata = { 'Prognosis': 'PfnNoReplica', 'LFN': lfn, 'PFN': pfn, 'StorageElement': storageElementName, 'Size': pfnSize[pfn] } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) for lfn, repDict in res[ 'Value'][ 'Successful'].items( ): pfn = lfnPfnDict[lfn] registeredPfns = repDict.values( ) if not pfn in registeredPfns: gLogger.error( "SEvsLFCAgent.execute: SE PFN not registered.", "%s %s" % (lfn, pfn)) fileMetadata = { 'Prognosis': 'PfnNoReplica', 'LFN': lfn, 'PFN': pfn, 'StorageElement': storageElementName } res = IntegrityDB.insertProblematic( AGENT_NAME, fileMetadata) if res['OK']: gLogger.info( "SEvsLFCAgent.execute: Successfully added to IntegrityDB." ) gLogger.error( "Change the status in the LFC,ProcDB...." ) else: gLogger.error( "Shit, f**k, bugger. Add the failover." ) else: gLogger.info( "SEvsLFCAgent.execute: SE Pfn verified.", pfn) oNamespaceBrowser.updateDirs(subDirs) oRequest.setSubRequestFileAttributeValue( ind, 'integrity', lfn, 'Status', 'Done') ################################################ # If the sub-request is none of the above types else: gLogger.info( "SEvsLFCAgent.execute: Operation not supported.", operation) ################################################ # Determine whether there are any active files if oRequest.isSubRequestEmpty(ind, 'integrity')['Value']: oRequest.setSubRequestStatus(ind, 'integrity', 'Done') ################################################ # If the sub-request is already in terminal state else: gLogger.info( "SEvsLFCAgent.execute: Sub-request %s is status '%s' and not to be executed." % (ind, subRequestAttributes['Status'])) ################################################ # Generate the new request string after operation requestString = oRequest.toXML()['Value'] res = self.RequestDBClient.updateRequest(requestName, requestString, sourceServer) return S_OK()
def __getStorageDirectoryContents(self, lfnDir, storageElement): """ Obtians the contents of the supplied directory on the storage """ gLogger.info('Obtaining the contents for %s directories at %s' % (len(lfnDir), storageElement)) se = StorageElement(storageElement) res = se.getPfnForLfn(lfnDir) if not res['OK']: gLogger.error("Failed to get PFNs for directories", res['Message']) return res for directory, error in res['Value']['Failed'].items(): gLogger.error('Failed to obtain directory PFN from LFNs', '%s %s' % (directory, error)) if res['Value']['Failed']: return S_ERROR('Failed to obtain directory PFN from LFNs') storageDirectories = res['Value']['Successful'].values() res = se.exists(storageDirectories) if not res['OK']: gLogger.error("Failed to obtain existance of directories", res['Message']) return res for directory, error in res['Value']['Failed'].items(): gLogger.error('Failed to determine existance of directory', '%s %s' % (directory, error)) if res['Value']['Failed']: return S_ERROR('Failed to determine existance of directory') directoryExists = res['Value']['Successful'] activeDirs = [] for directory in sortList(directoryExists.keys()): exists = directoryExists[directory] if exists: activeDirs.append(directory) allFiles = {} while len(activeDirs) > 0: currentDir = activeDirs[0] res = se.listDirectory(currentDir) activeDirs.remove(currentDir) if not res['OK']: gLogger.error('Failed to get directory contents', res['Message']) return res elif res['Value']['Failed'].has_key(currentDir): gLogger.error( 'Failed to get directory contents', '%s %s' % (currentDir, res['Value']['Failed'][currentDir])) return S_ERROR(res['Value']['Failed'][currentDir]) else: dirContents = res['Value']['Successful'][currentDir] activeDirs.extend(dirContents['SubDirs']) fileMetadata = dirContents['Files'] # RF_NOTE This ugly trick is needed because se.getPfnPath does not follow the Successful/Failed convention # res = { "Successful" : {}, "Failed" : {} } # for pfn in fileMetadata: # inRes = se.getPfnPath( pfn ) # if inRes["OK"]: # res["Successful"][pfn] = inRes["Value"] # else: # res["Failed"][pfn] = inRes["Message"] res = se.getLfnForPfn(fileMetadata.keys()) if not res['OK']: gLogger.error('Failed to get directory content LFNs', res['Message']) return res for pfn, error in res['Value']['Failed'].items(): gLogger.error("Failed to get LFN for PFN", "%s %s" % (pfn, error)) if res['Value']['Failed']: return S_ERROR("Failed to get LFNs for PFNs") pfnLfns = res['Value']['Successful'] for pfn, lfn in pfnLfns.items(): fileMetadata[pfn]['LFN'] = lfn allFiles.update(fileMetadata) zeroSizeFiles = [] lostFiles = [] unavailableFiles = [] for pfn in sortList(allFiles.keys()): if os.path.basename(pfn) == 'dirac_directory': allFiles.pop(pfn) else: metadata = allFiles[pfn] if metadata['Size'] == 0: zeroSizeFiles.append( (metadata['LFN'], pfn, storageElement, 'PFNZeroSize')) # if metadata['Lost']: # lostFiles.append((metadata['LFN'],pfn,storageElement,'PFNLost')) # if metadata['Unavailable']: # unavailableFiles.append((metadata['LFN'],pfn,storageElement,'PFNUnavailable')) if zeroSizeFiles: self.__reportProblematicReplicas(zeroSizeFiles, storageElement, 'PFNZeroSize') if lostFiles: self.__reportProblematicReplicas(lostFiles, storageElement, 'PFNLost') if unavailableFiles: self.__reportProblematicReplicas(unavailableFiles, storageElement, 'PFNUnavailable') gLogger.info('Obtained at total of %s files for directories at %s' % (len(allFiles), storageElement)) return S_OK(allFiles)