def getSiteCEMapping(): """ Returns a dictionary of all sites and their CEs as a list, e.g. {'LCG.CERN.ch':['ce101.cern.ch',...]} If gridName is specified, result is restricted to that Grid type. """ siteCEMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleSites() if not result['OK']: return result sites = result['Value'] for site in sites: result = resourceHelper.getEligibleResources('Computing', {'Site': site}) if not result['OK']: continue ceList = result['Value'] result = getSiteFullNames(site) if not result['OK']: continue for sName in result['Value']: siteCEMapping[sName] = ceList return S_OK(siteCEMapping)
def getSiteCEMapping(): """ Returns a dictionary of all sites and their CEs as a list, e.g. {'LCG.CERN.ch':['ce101.cern.ch',...]} If gridName is specified, result is restricted to that Grid type. """ siteCEMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleSites() if not result['OK']: return result sites = result['Value'] for site in sites: result = resourceHelper.getEligibleResources( 'Computing', {'Site':site} ) if not result['OK']: continue ceList = result['Value'] result = getSiteFullNames( site ) if not result['OK']: continue for sName in result['Value']: siteCEMapping[sName] = ceList return S_OK( siteCEMapping )
def getCEsForSite( siteName ): """ Given a DIRAC site name this method returns a list of corresponding CEs. """ resourceHelper = Resources() result = resourceHelper.getEligibleResources( 'Computing', {'Site':siteName} ) if not result['OK']: return result ceList = result['Value'] return S_OK( ceList )
def getCEsForSite(siteName): """ Given a DIRAC site name this method returns a list of corresponding CEs. """ resourceHelper = Resources() result = resourceHelper.getEligibleResources('Computing', {'Site': siteName}) if not result['OK']: return result ceList = result['Value'] return S_OK(ceList)
def __checkSEs( self, seList ): resources = Resources() res = resources.getEligibleResources( 'Storage' ) if not res['OK']: return self._errorReport( res, 'Failed to get possible StorageElements' ) missing = [] for se in seList: if not se in res['Value']: gLogger.error( "StorageElement %s is not known" % se ) missing.append( se ) if missing: return S_ERROR( "%d StorageElements not known" % len( missing ) ) return S_OK()
def getCESiteMapping(): """ Returns a dictionary of all CEs and their associated site, e.g. {'ce101.cern.ch':'LCG.CERN.ch', ...]} """ ceSiteMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleResources('Computing') if not result['OK']: return result ceList = result['Value'] for ce in ceList: result = getSiteForCE(ce) if not result['OK']: continue site = result['Value'] ceSiteMapping[ce] = site return S_OK(ceSiteMapping)
def getCESiteMapping(): """ Returns a dictionary of all CEs and their associated site, e.g. {'ce101.cern.ch':'LCG.CERN.ch', ...]} """ ceSiteMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleResources( 'Computing' ) if not result['OK']: return result ceList = result['Value'] for ce in ceList: result = getSiteForCE( ce ) if not result['OK']: continue site = result['Value'] ceSiteMapping[ce] = site return S_OK( ceSiteMapping )
def getSESiteMapping(gridName=''): """ Returns a dictionary of all SEs and their associated site(s), e.g. {'CERN-RAW':'LCG.CERN.ch','CERN-RDST':'LCG.CERN.ch',...]} Although normally one site exists for a given SE, it is possible over all Grid types to have multiple entries. If gridName is specified, result is restricted to that Grid type. """ seSiteMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleResources('Storage') if not result['OK']: return result seList = result['Value'] for se in seList: result = getSitesForSE(se) if not result['OK']: continue site = result['Value'] seSiteMapping[se] = site return S_OK(seSiteMapping)
def getSESiteMapping( gridName = '' ): """ Returns a dictionary of all SEs and their associated site(s), e.g. {'CERN-RAW':'LCG.CERN.ch','CERN-RDST':'LCG.CERN.ch',...]} Although normally one site exists for a given SE, it is possible over all Grid types to have multiple entries. If gridName is specified, result is restricted to that Grid type. """ seSiteMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleResources( 'Storage' ) if not result['OK']: return result seList = result['Value'] for se in seList: result = getSitesForSE( se ) if not result['OK']: continue site = result['Value'] seSiteMapping[se] = site return S_OK( seSiteMapping )
def getSiteSEMapping(): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} If gridName is specified, result is restricted to that Grid type. """ siteSEMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleSites() if not result['OK']: return result sites = result['Value'] for site in sites: result = resourceHelper.getEligibleResources('Storage', {'Site': site}) if not result['OK']: continue seList = result['Value'] result = getSiteFullNames(site) if not result['OK']: continue for sName in result['Value']: siteSEMapping[sName] = seList # Add Sites from the SiteToLocalSEMapping in the CS opsHelper = Operations() result = opsHelper.getSiteMapping('Storage', 'LocalSE') if result['OK']: mapping = result['Value'] for site in mapping: if site not in siteSEMapping: siteSEMapping[site] = mapping[site] else: for se in mapping[site]: if se not in siteSEMapping[site]: siteSEMapping[site].append(se) return S_OK(siteSEMapping)
def getSiteSEMapping(): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} If gridName is specified, result is restricted to that Grid type. """ siteSEMapping = {} resourceHelper = Resources() result = resourceHelper.getEligibleSites() if not result['OK']: return result sites = result['Value'] for site in sites: result = resourceHelper.getEligibleResources( 'Storage', {'Site':site} ) if not result['OK']: continue seList = result['Value'] result = getSiteFullNames( site ) if not result['OK']: continue for sName in result['Value']: siteSEMapping[sName] = seList # Add Sites from the SiteToLocalSEMapping in the CS opsHelper = Operations() result = opsHelper.getSiteMapping( 'Storage', 'LocalSE' ) if result['OK']: mapping = result['Value'] for site in mapping: if site not in siteSEMapping: siteSEMapping[site] = mapping[site] else: for se in mapping[site]: if se not in siteSEMapping[site]: siteSEMapping[site].append( se ) return S_OK( siteSEMapping )
class Synchronizer( object ): ''' Every time there is a successful write on the CS, Synchronizer().sync() is executed. It updates the database with the values on the CS. ''' def __init__( self ): """ Constructor. examples: >>> s = Synchronizer() """ self.log = gLogger.getSubLogger( self.__class__.__name__ ) self.operations = Operations() self.resources = Resources() self.rStatus = ResourceStatusClient.ResourceStatusClient() self.rssConfig = RssConfiguration() self.diracAdmin = DiracAdmin() def sync( self, _eventName, _params ): ''' Main synchronizer method. It synchronizes the three types of elements: Sites, Resources and Nodes. Each _syncX method returns a dictionary with the additions and deletions. examples: >>> s.sync( None, None ) S_OK() :Parameters: **_eventName** - any this parameter is ignored, but needed by caller function. **_params** - any this parameter is ignored, but needed by caller function. :return: S_OK ''' defSyncResult = { 'added' : [], 'deleted' : [] } # Sites syncSites = self._syncSites() if not syncSites[ 'OK' ]: self.log.error( syncSites[ 'Message' ] ) syncSites = ( syncSites[ 'OK' ] and syncSites[ 'Value' ] ) or defSyncResult # Resources syncResources = self._syncResources() if not syncResources[ 'OK' ]: self.log.error( syncResources[ 'Message' ] ) syncResources = ( syncResources[ 'OK' ] and syncResources[ 'Value' ] ) or defSyncResult # Nodes syncNodes = self._syncNodes() if not syncNodes[ 'OK' ]: self.log.error( syncNodes[ 'Message' ] ) syncNodes = ( syncNodes[ 'OK' ] and syncNodes[ 'Value' ] ) or defSyncResult # Notify via email to : self.notify( syncSites, syncResources, syncNodes ) return S_OK() def notify( self, syncSites, syncResources, syncNodes ): """ Method sending email notification with the result of the synchronization. Email is sent to Operations( EMail/Production ) email address. examples: >>> s.notify( {}, {}, {} ) >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} ) >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} ) :Parameters: **syncSites** - dict() ( keys: added, deleted ) dictionary with the sites added and deleted from the DB **syncResources** - dict() ( keys: added, deleted ) dictionary with the resources added and deleted from the DB **syncNodes** - dict() ( keys: added, deleted ) dictionary with the nodes added and deleted from the DB :return: S_OK """ # Human readable summary msgBody = self.getBody( syncSites, syncResources, syncNodes ) self.log.info( msgBody ) # Email addresses toAddress = self.operations.getValue( 'EMail/Production', '' ) fromAddress = self.rssConfig.getConfigFromAddress( '' ) if toAddress and fromAddress and msgBody: # Subject of the email setup = gConfig.getValue( 'DIRAC/Setup' ) subject = '[RSS](%s) CS Synchronization' % setup self.diracAdmin.sendMail( toAddress, subject, msgBody, fromAddress = fromAddress ) def getBody( self, syncSites, syncResources, syncNodes ): """ Method that given the outputs of the three synchronization methods builds a human readable string. examples: >>> s.getBody( {}, {}, {} ) '' >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} ) ''' SITES: Site: deleted:1 RubbishSite ''' >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} ) ''' SITES: Site: deleted:1 RubbishSite RESOURCES: Computing: added:2 newCE01 newCE02 ''' :Parameters: **syncSites** - dict() ( keys: added, deleted ) dictionary with the sites added and deleted from the DB **syncResources** - dict() ( keys: added, deleted ) dictionary with the resources added and deleted from the DB **syncNodes** - dict() ( keys: added, deleted ) dictionary with the nodes added and deleted from the DB :return: str """ syncMsg = '' for element, syncResult in [ ( 'SITES', syncSites ), ( 'RESOURCES', syncResources ), ( 'NODES', syncNodes ) ]: elementsMsg = '' for elementType, elements in syncResult.items(): elementMsg = '' if elements[ 'added' ]: elementMsg += '\n %s added: %d \n' % ( elementType, len( elements[ 'added' ] ) ) elementMsg += ' ' + '\n '.join( elements[ 'added' ] ) if elements[ 'deleted' ]: elementMsg += '\n %s deleted: %d \n' % ( elementType, len( elements[ 'deleted' ] ) ) elementMsg += ' ' + '\n '.join( elements[ 'deleted' ] ) if elementMsg: elementsMsg += '\n\n%s:\n' % elementType elementsMsg += elementMsg if elementsMsg: syncMsg += '\n\n%s:' % element + elementsMsg return syncMsg #............................................................................. # Sync methods: Site, Resource & Node def _syncSites( self ): """ Method that synchronizes sites ( using their canonical name: CERN.ch ) with elementType = 'Site'. It gets from the CS the eligible site names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >> s._syncSites() S_OK( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] } } ) :return: S_OK( { 'Site' : { 'added' : [], 'deleted' : [] }} ) | S_ERROR """ # Get site names from the CS foundSites = self.resources.getEligibleSites() if not foundSites[ 'OK' ]: return foundSites sites = {} # Synchronize with the DB resSync = self.__dbSync( 'Site', 'Site', foundSites[ 'Value' ] ) if not resSync[ 'OK' ]: self.log.error( 'Error synchronizing Sites' ) self.log.error( resSync[ 'Message' ] ) else: sites = resSync[ 'Value' ] return S_OK( { 'Site' : sites } ) def _syncResources( self ): """ Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary keys. It makes one sync round per key ( elementType ). Gets from the CS the eligible Resource/<elementType> names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >>> s._syncResources() S_OK( { 'Computing' : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }, 'Storage' : { 'added' : [], 'deleted' : [] }, ... } ) :return: S_OK( { 'RESOURCE_NODE_MAPPINGKey1' : { 'added' : [], 'deleted' : [] }, ...} ) """ resources = {} # Iterate over the different elementTypes for Resource ( Computing, Storage... ) for elementType in RESOURCE_NODE_MAPPING.keys(): # Get Resource / <elementType> names from CS foundResources = self.resources.getEligibleResources( elementType ) if not foundResources[ 'OK' ]: self.log.error( foundResources[ 'Message' ] ) continue # Translate CS result into a list foundResources = foundResources[ 'Value' ] # Synchronize with the DB resSync = self.__dbSync( 'Resource', elementType, foundResources ) if not resSync[ 'OK' ]: self.log.error( 'Error synchronizing %s %s' % ( 'Resource', elementType ) ) self.log.error( resSync[ 'Message' ] ) else: resources[ elementType ] = resSync[ 'Value' ] return S_OK( resources ) def _syncNodes( self ): """ Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary values. It makes one sync round per key ( elementType ). Gets from the CS the eligible Node/<elementType> names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >>> s._syncNodes() S_OK( { 'Queue' : { 'added' : [], 'deleted' : [] }, ... } ) :return: S_OK( { 'RESOURCE_NODE_MAPPINGValue1' : { 'added' : [], 'deleted' : [] }, ...} ) """ nodes = {} # Iterate over the different elementTypes for Node ( Queue, AccessProtocol... ) for elementType in RESOURCE_NODE_MAPPING.values(): # Get Node / <elementType> names from CS foundNodes = self.resources.getEligibleNodes( elementType ) if not foundNodes[ 'OK' ]: self.log.error( foundNodes[ 'Value' ] ) continue # Translate CS result into a list : maps NodeName to SiteName<>NodeName to # avoid duplicates # Looong list comprehension, sorry ! foundNodes = [ '%s<>%s' % ( key, item ) for key, subDict in foundNodes[ 'Value' ].items() for subList in subDict.values() for item in subList ] # Synchronize with the DB resSync = self.__dbSync( 'Node', elementType, foundNodes ) if not resSync[ 'OK' ]: self.log.error( 'Error synchronizing %s %s' % ( 'Node', elementType ) ) self.log.error( resSync[ 'Message' ] ) else: nodes[ elementType ] = resSync[ 'Value' ] return S_OK( nodes ) #............................................................................. # DB sync actions def __dbSync( self, elementFamily, elementType, elementsCS ): """ Method synchronizing CS and DB. Compares <elementsCS> with <elementsDB> given the elementFamily and elementType ( e.g. Resource / Computing ). If there are missing elements in the DB, are inserted. If are missing elements in the CS, are deleted from the DB. Note that the logs from the RSS DB are kept ! ( just in case ). :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily> **elementsCS** - list list with the elements for <elementFamily>/<elementType> found in the CS :return: S_OK( { 'added' : [], 'deleted' : [] } ) | S_ERROR """ # deleted, added default response syncRes = { 'deleted' : [], 'added' : [], } # Gets <elementFamily>/<elementType> elements from DB elementsDB = self.rStatus.selectStatusElement( elementFamily, 'Status', elementType = elementType, meta = { 'columns' : [ 'name' ] } ) if not elementsDB[ 'OK' ]: return elementsDB elementsDB = [ elementDB[ 0 ] for elementDB in elementsDB[ 'Value' ] ] # Elements in DB but not in CS -> to be deleted toBeDeleted = list( set( elementsDB ).difference( set( elementsCS ) ) ) if toBeDeleted: resDelete = self.__dbDelete( elementFamily, elementType, toBeDeleted ) if not resDelete[ 'OK' ]: return resDelete else: syncRes[ 'deleted' ] = toBeDeleted # Elements in CS but not in DB -> to be added toBeAdded = list( set( elementsCS ).difference( set( elementsDB ) ) ) if toBeAdded: resInsert = self.__dbInsert( elementFamily, elementType, toBeAdded ) if not resInsert[ 'OK' ]: return resInsert else: syncRes[ 'added' ] = toBeAdded return S_OK( syncRes ) def __dbDelete( self, elementFamily, elementType, toBeDeleted ): """ Method that given the elementFamily and elementType, deletes all entries in the History and Status tables for the given elements in toBeDeleted ( all their status Types ). :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily>, just used for logging purposes. **toBeDeleted** - list list with the elements to be deleted :return: S_OK | S_ERROR """ self.log.info( 'Deleting %s %s:' % ( elementFamily, elementType ) ) self.log.info( toBeDeleted ) return self.rStatus._extermineStatusElement( elementFamily, toBeDeleted ) def __dbInsert( self, elementFamily, elementType, toBeAdded ): """ Method that given the elementFamily and elementType, adds all elements in toBeAdded with their respective statusTypes, obtained from the CS. They are synchronized with status 'Unknown' and reason 'Synchronized'. :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily> **toBeDeleted** - list list with the elements to be added :return: S_OK | S_ERROR """ self.log.info( 'Adding %s %s:' % ( elementFamily, elementType ) ) self.log.info( toBeAdded ) statusTypes = self.rssConfig.getConfigStatusType( elementType ) for element in toBeAdded: for statusType in statusTypes: resInsert = self.rStatus.addIfNotThereStatusElement( elementFamily, 'Status', name = element, statusType = statusType, status = 'Unknown', elementType = elementType, reason = 'Synchronized') if not resInsert[ 'OK' ]: return resInsert return S_OK() #............................................................................... # # def _syncUsers( self ): # ''' # Sync Users: compares CS with DB and does the necessary modifications. # ''' # # gLogger.verbose( '-- Synchronizing users --') # # usersCS = CSHelpers.getRegistryUsers() # if not usersCS[ 'OK' ]: # return usersCS # usersCS = usersCS[ 'Value' ] # # gLogger.verbose( '%s users found in CS' % len( usersCS ) ) # # usersDB = self.rManagement.selectUserRegistryCache( meta = { 'columns' : [ 'login' ] } ) # if not usersDB[ 'OK' ]: # return usersDB # usersDB = [ userDB[0] for userDB in usersDB[ 'Value' ] ] # # # Users that are in DB but not in CS # toBeDeleted = list( set( usersDB ).difference( set( usersCS.keys() ) ) ) # gLogger.verbose( '%s users to be deleted' % len( toBeDeleted ) ) # # # Delete users # # FIXME: probably it is not needed since there is a DatabaseCleanerAgent # for userLogin in toBeDeleted: # # deleteQuery = self.rManagement.deleteUserRegistryCache( login = userLogin ) # # gLogger.verbose( '... %s' % userLogin ) # if not deleteQuery[ 'OK' ]: # return deleteQuery # # # AddOrModify Users # for userLogin, userDict in usersCS.items(): # # _name = userDict[ 'DN' ].split( '=' )[ -1 ] # _email = userDict[ 'Email' ] # # query = self.rManagement.addOrModifyUserRegistryCache( userLogin, _name, _email ) # gLogger.verbose( '-> %s' % userLogin ) # if not query[ 'OK' ]: # return query # # return S_OK() ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def _resolveCECandidates(self, taskQueueDict): """ Return a list of CEs for this TaskQueue """ # assume user knows what they're doing and avoid site mask e.g. sam jobs if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']: self.log.info( 'CEs requested by TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join(taskQueueDict['GridCEs'])) return taskQueueDict['GridCEs'] # Get the mask siteStatus = SiteStatus() ret = siteStatus.getUsableSites('ComputingAccess') if not ret['OK']: self.log.error('Can not retrieve site Mask from DB:', ret['Message']) return [] usableSites = ret['Value'] if not usableSites: self.log.error('Site mask is empty') return [] self.log.verbose('Site Mask: %s' % ', '.join(usableSites)) # remove banned sites from siteMask if 'BannedSites' in taskQueueDict: for site in taskQueueDict['BannedSites']: if site in usableSites: usableSites.remove(site) self.log.verbose('Removing banned site %s from site Mask' % site) # remove from the mask if a Site is given siteMask = [ site for site in usableSites if 'Sites' not in taskQueueDict or site in taskQueueDict['Sites'] ] if not siteMask: # pilot can not be submitted self.log.info('No Valid Site Candidate in Mask for TaskQueue %s' % taskQueueDict['TaskQueueID']) return [] self.log.info( 'Site Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join(siteMask)) # Get CE's associates to the given site Names ceMask = [] resources = Resources(vo=self.virtualOrganization) result = resources.getEligibleResources( 'Computing', { 'Site': siteMask, 'SubmissionMode': 'gLite', 'CEType': ['LCG', 'CREAM'] }) if not result['OK']: self.log.error("Failed to get eligible ce's:", result['Message']) return [] ces = result['Value'] for ce in ces: ceHost = resources.getComputingElementValue(ce, 'Host', 'unknown') if ceHost != 'unknown': ceMask.append(ceHost) if not ceMask: self.log.info( 'No CE Candidate found for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join(siteMask)) self.log.verbose( 'CE Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join(ceMask)) return ceMask
class ResourcesTestCase( unittest.TestCase ): def setUp( self ): Script.disableCS( ) Script.parseCommandLine() self.resources = Resources() def test_getSites( self ): print result = self.resources.getSites( {'Name':['CERN','CPPM','PNPI']} ) self.assertTrue( result['OK'], 'getSites' ) sites = result['Value'] print sites result = self.resources.getEligibleSites( {'Name':['CERN','CPPM','PNPI']} ) self.assertTrue( result['OK'], 'getEligibleSites' ) eligibleSites = result['Value'] self.assertEqual(sites, eligibleSites, 'sites and eligible sites are the same') def test_getResources( self ): print result = self.resources.getResources( 'CERN', 'Storage' ) self.assertTrue( result['OK'], 'getResources' ) ses = result['Value'] print ses def test_getNodes( self ): print result = self.resources.getNodes( 'CERN::ce130', 'Queue' ) self.assertTrue( result['OK'], 'getNodes' ) nodes = result['Value'] print nodes def test_getEligibleResources( self ): print result = self.resources.getEligibleResources( 'Computing', { 'Site':['CERN','CPPM','Zurich'],'SubmissionMode':'Direct' } ) self.assertTrue( result['OK'], 'getEligibleResources' ) ces = result['Value'] print ces def test_getEligibleNodes( self ): print result = self.resources.getEligibleNodes( 'AccessProtocol', { 'Site':['CERN','CPPM','Zurich'] }, { 'Protocol':'srm' } ) self.assertTrue( result['OK'], 'getEligibleNodes' ) aps = result['Value'] print aps def test_getEligibleComputingElements( self ): siteMask = ['LCG.CERN.ch','LCG.CPPM.fr'] result = self.resources.getEligibleResources( 'Computing', {'Site':siteMask, 'SubmissionMode':'gLite', 'CEType':['LCG','CREAM']} ) self.assertTrue( result['OK'], 'getEligibleResources' ) print for ce in result['Value']: ceHost = self.resources.getComputingElementValue( ce, 'Host', 'unknown' ) print ce, ceHost
class Synchronizer(object): ''' Every time there is a successful write on the CS, Synchronizer().sync() is executed. It updates the database with the values on the CS. ''' def __init__(self): """ Constructor. examples: >>> s = Synchronizer() """ self.log = gLogger.getSubLogger(self.__class__.__name__) self.operations = Operations() self.resources = Resources() self.rStatus = ResourceStatusClient.ResourceStatusClient() self.rssConfig = RssConfiguration() self.diracAdmin = DiracAdmin() def sync(self, _eventName, _params): ''' Main synchronizer method. It synchronizes the three types of elements: Sites, Resources and Nodes. Each _syncX method returns a dictionary with the additions and deletions. examples: >>> s.sync( None, None ) S_OK() :Parameters: **_eventName** - any this parameter is ignored, but needed by caller function. **_params** - any this parameter is ignored, but needed by caller function. :return: S_OK ''' defSyncResult = {'added': [], 'deleted': []} # Sites syncSites = self._syncSites() if not syncSites['OK']: self.log.error(syncSites['Message']) syncSites = (syncSites['OK'] and syncSites['Value']) or defSyncResult # Resources syncResources = self._syncResources() if not syncResources['OK']: self.log.error(syncResources['Message']) syncResources = (syncResources['OK'] and syncResources['Value']) or defSyncResult # Nodes syncNodes = self._syncNodes() if not syncNodes['OK']: self.log.error(syncNodes['Message']) syncNodes = (syncNodes['OK'] and syncNodes['Value']) or defSyncResult # Notify via email to : self.notify(syncSites, syncResources, syncNodes) return S_OK() def notify(self, syncSites, syncResources, syncNodes): """ Method sending email notification with the result of the synchronization. Email is sent to Operations( EMail/Production ) email address. examples: >>> s.notify( {}, {}, {} ) >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} ) >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} ) :Parameters: **syncSites** - dict() ( keys: added, deleted ) dictionary with the sites added and deleted from the DB **syncResources** - dict() ( keys: added, deleted ) dictionary with the resources added and deleted from the DB **syncNodes** - dict() ( keys: added, deleted ) dictionary with the nodes added and deleted from the DB :return: S_OK """ # Human readable summary msgBody = self.getBody(syncSites, syncResources, syncNodes) self.log.info(msgBody) # Email addresses toAddress = self.operations.getValue('EMail/Production', '') fromAddress = self.rssConfig.getConfigFromAddress('') if toAddress and fromAddress and msgBody: # Subject of the email setup = gConfig.getValue('DIRAC/Setup') subject = '[RSS](%s) CS Synchronization' % setup self.diracAdmin.sendMail(toAddress, subject, msgBody, fromAddress=fromAddress) def getBody(self, syncSites, syncResources, syncNodes): """ Method that given the outputs of the three synchronization methods builds a human readable string. examples: >>> s.getBody( {}, {}, {} ) '' >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} ) ''' SITES: Site: deleted:1 RubbishSite ''' >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} ) ''' SITES: Site: deleted:1 RubbishSite RESOURCES: Computing: added:2 newCE01 newCE02 ''' :Parameters: **syncSites** - dict() ( keys: added, deleted ) dictionary with the sites added and deleted from the DB **syncResources** - dict() ( keys: added, deleted ) dictionary with the resources added and deleted from the DB **syncNodes** - dict() ( keys: added, deleted ) dictionary with the nodes added and deleted from the DB :return: str """ syncMsg = '' for element, syncResult in [('SITES', syncSites), ('RESOURCES', syncResources), ('NODES', syncNodes)]: elementsMsg = '' for elementType, elements in syncResult.items(): elementMsg = '' if elements['added']: elementMsg += '\n %s added: %d \n' % ( elementType, len(elements['added'])) elementMsg += ' ' + '\n '.join(elements['added']) if elements['deleted']: elementMsg += '\n %s deleted: %d \n' % ( elementType, len(elements['deleted'])) elementMsg += ' ' + '\n '.join(elements['deleted']) if elementMsg: elementsMsg += '\n\n%s:\n' % elementType elementsMsg += elementMsg if elementsMsg: syncMsg += '\n\n%s:' % element + elementsMsg return syncMsg #............................................................................. # Sync methods: Site, Resource & Node def _syncSites(self): """ Method that synchronizes sites ( using their canonical name: CERN.ch ) with elementType = 'Site'. It gets from the CS the eligible site names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >> s._syncSites() S_OK( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] } } ) :return: S_OK( { 'Site' : { 'added' : [], 'deleted' : [] }} ) | S_ERROR """ # Get site names from the CS foundSites = self.resources.getEligibleSites() if not foundSites['OK']: return foundSites sites = {} # Synchronize with the DB resSync = self.__dbSync('Site', 'Site', foundSites['Value']) if not resSync['OK']: self.log.error('Error synchronizing Sites') self.log.error(resSync['Message']) else: sites = resSync['Value'] return S_OK({'Site': sites}) def _syncResources(self): """ Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary keys. It makes one sync round per key ( elementType ). Gets from the CS the eligible Resource/<elementType> names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >>> s._syncResources() S_OK( { 'Computing' : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }, 'Storage' : { 'added' : [], 'deleted' : [] }, ... } ) :return: S_OK( { 'RESOURCE_NODE_MAPPINGKey1' : { 'added' : [], 'deleted' : [] }, ...} ) """ resources = {} # Iterate over the different elementTypes for Resource ( Computing, Storage... ) for elementType in RESOURCE_NODE_MAPPING.keys(): # Get Resource / <elementType> names from CS foundResources = self.resources.getEligibleResources(elementType) if not foundResources['OK']: self.log.error(foundResources['Message']) continue # Translate CS result into a list foundResources = foundResources['Value'] # Synchronize with the DB resSync = self.__dbSync('Resource', elementType, foundResources) if not resSync['OK']: self.log.error('Error synchronizing %s %s' % ('Resource', elementType)) self.log.error(resSync['Message']) else: resources[elementType] = resSync['Value'] return S_OK(resources) def _syncNodes(self): """ Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary values. It makes one sync round per key ( elementType ). Gets from the CS the eligible Node/<elementType> names and then synchronizes them with the DB. If not on the DB, they are added. If in the DB but not on the CS, they are deleted. examples: >>> s._syncNodes() S_OK( { 'Queue' : { 'added' : [], 'deleted' : [] }, ... } ) :return: S_OK( { 'RESOURCE_NODE_MAPPINGValue1' : { 'added' : [], 'deleted' : [] }, ...} ) """ nodes = {} # Iterate over the different elementTypes for Node ( Queue, AccessProtocol... ) for elementType in RESOURCE_NODE_MAPPING.values(): # Get Node / <elementType> names from CS foundNodes = self.resources.getEligibleNodes(elementType) if not foundNodes['OK']: self.log.error(foundNodes['Value']) continue # Translate CS result into a list : maps NodeName to SiteName<>NodeName to # avoid duplicates # Looong list comprehension, sorry ! foundNodes = [ '%s<>%s' % (key, item) for key, subDict in foundNodes['Value'].items() for subList in subDict.values() for item in subList ] # Synchronize with the DB resSync = self.__dbSync('Node', elementType, foundNodes) if not resSync['OK']: self.log.error('Error synchronizing %s %s' % ('Node', elementType)) self.log.error(resSync['Message']) else: nodes[elementType] = resSync['Value'] return S_OK(nodes) #............................................................................. # DB sync actions def __dbSync(self, elementFamily, elementType, elementsCS): """ Method synchronizing CS and DB. Compares <elementsCS> with <elementsDB> given the elementFamily and elementType ( e.g. Resource / Computing ). If there are missing elements in the DB, are inserted. If are missing elements in the CS, are deleted from the DB. Note that the logs from the RSS DB are kept ! ( just in case ). :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily> **elementsCS** - list list with the elements for <elementFamily>/<elementType> found in the CS :return: S_OK( { 'added' : [], 'deleted' : [] } ) | S_ERROR """ # deleted, added default response syncRes = { 'deleted': [], 'added': [], } # Gets <elementFamily>/<elementType> elements from DB elementsDB = self.rStatus.selectStatusElement( elementFamily, 'Status', elementType=elementType, meta={'columns': ['name']}) if not elementsDB['OK']: return elementsDB elementsDB = [elementDB[0] for elementDB in elementsDB['Value']] # Elements in DB but not in CS -> to be deleted toBeDeleted = list(set(elementsDB).difference(set(elementsCS))) if toBeDeleted: resDelete = self.__dbDelete(elementFamily, elementType, toBeDeleted) if not resDelete['OK']: return resDelete else: syncRes['deleted'] = toBeDeleted # Elements in CS but not in DB -> to be added toBeAdded = list(set(elementsCS).difference(set(elementsDB))) if toBeAdded: resInsert = self.__dbInsert(elementFamily, elementType, toBeAdded) if not resInsert['OK']: return resInsert else: syncRes['added'] = toBeAdded return S_OK(syncRes) def __dbDelete(self, elementFamily, elementType, toBeDeleted): """ Method that given the elementFamily and elementType, deletes all entries in the History and Status tables for the given elements in toBeDeleted ( all their status Types ). :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily>, just used for logging purposes. **toBeDeleted** - list list with the elements to be deleted :return: S_OK | S_ERROR """ self.log.info('Deleting %s %s:' % (elementFamily, elementType)) self.log.info(toBeDeleted) return self.rStatus._extermineStatusElement(elementFamily, toBeDeleted) def __dbInsert(self, elementFamily, elementType, toBeAdded): """ Method that given the elementFamily and elementType, adds all elements in toBeAdded with their respective statusTypes, obtained from the CS. They are synchronized with status 'Unknown' and reason 'Synchronized'. :Parameters: **elementFamily** - str any of the valid element families : Site, Resource, Node **elementType** - str any of the valid element types for <elementFamily> **toBeDeleted** - list list with the elements to be added :return: S_OK | S_ERROR """ self.log.info('Adding %s %s:' % (elementFamily, elementType)) self.log.info(toBeAdded) statusTypes = self.rssConfig.getConfigStatusType(elementType) for element in toBeAdded: for statusType in statusTypes: resInsert = self.rStatus.addIfNotThereStatusElement( elementFamily, 'Status', name=element, statusType=statusType, status='Unknown', elementType=elementType, reason='Synchronized') if not resInsert['OK']: return resInsert return S_OK() #............................................................................... # # def _syncUsers( self ): # ''' # Sync Users: compares CS with DB and does the necessary modifications. # ''' # # gLogger.verbose( '-- Synchronizing users --') # # usersCS = CSHelpers.getRegistryUsers() # if not usersCS[ 'OK' ]: # return usersCS # usersCS = usersCS[ 'Value' ] # # gLogger.verbose( '%s users found in CS' % len( usersCS ) ) # # usersDB = self.rManagement.selectUserRegistryCache( meta = { 'columns' : [ 'login' ] } ) # if not usersDB[ 'OK' ]: # return usersDB # usersDB = [ userDB[0] for userDB in usersDB[ 'Value' ] ] # # # Users that are in DB but not in CS # toBeDeleted = list( set( usersDB ).difference( set( usersCS.keys() ) ) ) # gLogger.verbose( '%s users to be deleted' % len( toBeDeleted ) ) # # # Delete users # # FIXME: probably it is not needed since there is a DatabaseCleanerAgent # for userLogin in toBeDeleted: # # deleteQuery = self.rManagement.deleteUserRegistryCache( login = userLogin ) # # gLogger.verbose( '... %s' % userLogin ) # if not deleteQuery[ 'OK' ]: # return deleteQuery # # # AddOrModify Users # for userLogin, userDict in usersCS.items(): # # _name = userDict[ 'DN' ].split( '=' )[ -1 ] # _email = userDict[ 'Email' ] # # query = self.rManagement.addOrModifyUserRegistryCache( userLogin, _name, _email ) # gLogger.verbose( '-> %s' % userLogin ) # if not query[ 'OK' ]: # return query # # return S_OK() ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def __lookForCE(self): knownces = self.am_getOption('BannedCEs', []) resources = Resources(self.voName) result = resources.getEligibleResources('Computing', {'CEType': ['LCG', 'CREAM']}) if not result['OK']: return result siteDict = result['Value'] for site in siteDict: knownces += siteDict[site] # result = gConfig.getSections( '/Resources/Sites' ) # if not result['OK']: # return # grids = result['Value'] # # for grid in grids: # # result = gConfig.getSections( '/Resources/Sites/%s' % grid ) # if not result['OK']: # return # sites = result['Value'] # # for site in sites: # opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value'] # ces = List.fromChar( opt.get( 'CE', '' ) ) # knownces += ces response = ldapCEState('', vo=self.voName) if not response['OK']: self.log.error("Error during BDII request", response['Message']) response = self.__checkAlternativeBDIISite(ldapCEState, '', self.voName) return response newces = {} for queue in response['Value']: try: queuename = queue['GlueCEUniqueID'] except: continue cename = queuename.split(":")[0] if not cename in knownces: newces[cename] = None self.log.debug("newce", cename) body = "" possibleNewSites = [] for ce in newces.iterkeys(): response = ldapCluster(ce) if not response['OK']: self.log.warn("Error during BDII request", response['Message']) response = self.__checkAlternativeBDIISite(ldapCluster, ce) continue clusters = response['Value'] if len(clusters) != 1: self.log.warn("Error in cluster length", " CE %s Length %d" % (ce, len(clusters))) if len(clusters) == 0: continue cluster = clusters[0] fkey = cluster.get('GlueForeignKey', []) if type(fkey) == type(''): fkey = [fkey] nameBDII = None for entry in fkey: if entry.count('GlueSiteUniqueID'): nameBDII = entry.split('=')[1] break if not nameBDII: continue cestring = "CE: %s, GOCDB Name: %s" % (ce, nameBDII) self.log.info(cestring) response = ldapCE(ce) if not response['OK']: self.log.warn("Error during BDII request", response['Message']) response = self.__checkAlternativeBDIISite(ldapCE, ce) continue ceinfos = response['Value'] if len(ceinfos): ceinfo = ceinfos[0] systemName = ceinfo.get('GlueHostOperatingSystemName', 'Unknown') systemVersion = ceinfo.get('GlueHostOperatingSystemVersion', 'Unknown') systemRelease = ceinfo.get('GlueHostOperatingSystemRelease', 'Unknown') else: systemName = "Unknown" systemVersion = "Unknown" systemRelease = "Unknown" osstring = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease) self.log.info(osstring) response = ldapCEState(ce, vo=self.voName) if not response['OK']: self.log.warn("Error during BDII request", response['Message']) response = self.__checkAlternativeBDIISite( ldapCEState, ce, self.voName) continue newcestring = "\n\n%s\n%s" % (cestring, osstring) usefull = False cestates = response['Value'] for cestate in cestates: queuename = cestate.get('GlueCEUniqueID', 'UnknownName') queuestatus = cestate.get('GlueCEStateStatus', 'UnknownStatus') queuestring = "%s %s" % (queuename, queuestatus) self.log.info(queuestring) newcestring += "\n%s" % queuestring if queuestatus.count('Production'): usefull = True if usefull: body += newcestring possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % (nameBDII, ce)) if body: body = "We are glade to inform You about new CE(s) possibly suitable for %s:\n" % self.voName + body body += "\n\nTo suppress information about CE add its name to BannedCEs list." for possibleNewSite in possibleNewSites: body = "%s\n%s" % (body, possibleNewSite) self.log.info(body) if self.addressTo and self.addressFrom: notification = NotificationClient() result = notification.sendMail(self.addressTo, self.subject, body, self.addressFrom, localAttempt=False) return S_OK()
def _resolveCECandidates( self, taskQueueDict ): """ Return a list of CEs for this TaskQueue """ # assume user knows what they're doing and avoid site mask e.g. sam jobs if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']: self.log.info( 'CEs requested by TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( taskQueueDict['GridCEs'] ) ) return taskQueueDict['GridCEs'] # Get the mask siteStatus = SiteStatus() ret = siteStatus.getUsableSites( 'ComputingAccess' ) if not ret['OK']: self.log.error( 'Can not retrieve site Mask from DB:', ret['Message'] ) return [] usableSites = ret['Value'] if not usableSites: self.log.error( 'Site mask is empty' ) return [] self.log.verbose( 'Site Mask: %s' % ', '.join( usableSites ) ) # remove banned sites from siteMask if 'BannedSites' in taskQueueDict: for site in taskQueueDict['BannedSites']: if site in usableSites: usableSites.remove( site ) self.log.verbose( 'Removing banned site %s from site Mask' % site ) # remove from the mask if a Site is given siteMask = [ site for site in usableSites if 'Sites' not in taskQueueDict or site in taskQueueDict['Sites'] ] if not siteMask: # pilot can not be submitted self.log.info( 'No Valid Site Candidate in Mask for TaskQueue %s' % taskQueueDict['TaskQueueID'] ) return [] self.log.info( 'Site Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) ) # Get CE's associates to the given site Names ceMask = [] resources = Resources( vo = self.virtualOrganization ) result = resources.getEligibleResources( 'Computing', {'Site':siteMask, 'SubmissionMode':'gLite', 'CEType':['LCG','CREAM']} ) if not result['OK']: self.log.error( "Failed to get eligible ce's:", result['Message'] ) return [] ces = result['Value'] for ce in ces: ceHost = resources.getComputingElementValue( ce, 'Host', 'unknown' ) if ceHost != 'unknown': ceMask.append( ceHost ) if not ceMask: self.log.info( 'No CE Candidate found for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) ) self.log.verbose( 'CE Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( ceMask ) ) return ceMask
class ResourcesTestCase(unittest.TestCase): def setUp(self): Script.disableCS() Script.parseCommandLine() self.resources = Resources() def test_getSites(self): print result = self.resources.getSites({'Name': ['CERN', 'CPPM', 'PNPI']}) self.assertTrue(result['OK'], 'getSites') sites = result['Value'] print sites result = self.resources.getEligibleSites( {'Name': ['CERN', 'CPPM', 'PNPI']}) self.assertTrue(result['OK'], 'getEligibleSites') eligibleSites = result['Value'] self.assertEqual(sites, eligibleSites, 'sites and eligible sites are the same') def test_getResources(self): print result = self.resources.getResources('CERN', 'Storage') self.assertTrue(result['OK'], 'getResources') ses = result['Value'] print ses def test_getNodes(self): print result = self.resources.getNodes('CERN::ce130', 'Queue') self.assertTrue(result['OK'], 'getNodes') nodes = result['Value'] print nodes def test_getEligibleResources(self): print result = self.resources.getEligibleResources( 'Computing', { 'Site': ['CERN', 'CPPM', 'Zurich'], 'SubmissionMode': 'Direct' }) self.assertTrue(result['OK'], 'getEligibleResources') ces = result['Value'] print ces def test_getEligibleNodes(self): print result = self.resources.getEligibleNodes( 'AccessProtocol', {'Site': ['CERN', 'CPPM', 'Zurich']}, {'Protocol': 'srm'}) self.assertTrue(result['OK'], 'getEligibleNodes') aps = result['Value'] print aps def test_getEligibleComputingElements(self): siteMask = ['LCG.CERN.ch', 'LCG.CPPM.fr'] result = self.resources.getEligibleResources( 'Computing', { 'Site': siteMask, 'SubmissionMode': 'gLite', 'CEType': ['LCG', 'CREAM'] }) self.assertTrue(result['OK'], 'getEligibleResources') print for ce in result['Value']: ceHost = self.resources.getComputingElementValue( ce, 'Host', 'unknown') print ce, ceHost
class FileCatalog: ro_methods = [ 'exists', 'isLink', 'readLink', 'isFile', 'getFileMetadata', 'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents', 'resolveDataset', 'getPathPermissions', 'getLFNForPFN', 'getUsers', 'getGroups', 'getFileUserMetadata' ] write_methods = [ 'createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica', 'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost', 'createDirectory', 'setDirectoryStatus', 'removeDirectory', 'removeDataset', 'removeFileFromDataset', 'createDataset' ] def __init__(self, catalogs=[], vo=None): """ Default constructor """ self.valid = True self.timeout = 180 self.readCatalogs = [] self.writeCatalogs = [] self.vo = vo if not vo: result = getVOfromProxyGroup() if not result['OK']: return result self.vo = result['Value'] self.opHelper = Operations(vo=self.vo) self.reHelper = Resources(vo=self.vo) if type(catalogs) in types.StringTypes: catalogs = [catalogs] if catalogs: res = self._getSelectedCatalogs(catalogs) else: res = self._getCatalogs() if not res['OK']: self.valid = False elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0): self.valid = False def isOK(self): return self.valid def getReadCatalogs(self): return self.readCatalogs def getWriteCatalogs(self): return self.writeCatalogs def __getattr__(self, name): self.call = name if name in FileCatalog.write_methods: return self.w_execute elif name in FileCatalog.ro_methods: return self.r_execute else: raise AttributeError def __checkArgumentFormat(self, path): if type(path) in types.StringTypes: urls = {path: False} elif type(path) == types.ListType: urls = {} for url in path: urls[url] = False elif type(path) == types.DictType: urls = path else: return S_ERROR( "FileCatalog.__checkArgumentFormat: Supplied path is not of the correct format." ) return S_OK(urls) def w_execute(self, *parms, **kws): """ Write method executor. """ successful = {} failed = {} failedCatalogs = [] fileInfo = parms[0] res = self.__checkArgumentFormat(fileInfo) if not res['OK']: return res fileInfo = res['Value'] allLfns = fileInfo.keys() for catalogName, oCatalog, master in self.writeCatalogs: method = getattr(oCatalog, self.call) res = method(fileInfo, **kws) if not res['OK']: if master: # If this is the master catalog and it fails we dont want to continue with the other catalogs gLogger.error( "FileCatalog.w_execute: Failed to execute %s on master catalog %s." % (self.call, catalogName), res['Message']) return res else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs.append((catalogName, res['Message'])) else: for lfn, message in res['Value']['Failed'].items(): # Save the error message for the failed operations if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop(lfn) for lfn, result in res['Value']['Successful'].items(): # Save the result return for each file for the successful operations if not successful.has_key(lfn): successful[lfn] = {} successful[lfn][catalogName] = result # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs: for lfn in allLfns: if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = errorMessage resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) def r_execute(self, *parms, **kws): """ Read method executor. """ successful = {} failed = {} for catalogTuple in self.readCatalogs: oCatalog = catalogTuple[1] method = getattr(oCatalog, self.call) res = method(*parms, **kws) if res['OK']: if 'Successful' in res['Value']: for key, item in res['Value']['Successful'].items(): if not successful.has_key(key): successful[key] = item if failed.has_key(key): failed.pop(key) for key, item in res['Value']['Failed'].items(): if not successful.has_key(key): failed[key] = item if len(failed) == 0: resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) else: return res if (len(successful) == 0) and (len(failed) == 0): return S_ERROR('Failed to perform %s from any catalog' % self.call) resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog(self, catalogName, mode="Write", master=False): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject(catalogName) if not result['OK']: return result oCatalog = result['Value'] if mode.lower().find("read") != -1: self.readCatalogs.append((catalogName, oCatalog, master)) if mode.lower().find("write") != -1: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def removeCatalog(self, catalogName): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range(len(self.readCatalogs)): catalog = self.readCatalogs[i][0] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range(len(self.writeCatalogs)): catalog = self.writeCatalogs[i][0] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK('Catalog does not exist') def _getSelectedCatalogs(self, desiredCatalogs): for catalogName in desiredCatalogs: res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] self.readCatalogs.append((catalogName, oCatalog, True)) self.writeCatalogs.append((catalogName, oCatalog, True)) return S_OK() def _getCatalogs(self): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources result = self.opHelper.getSections('/Services/Catalogs') fileCatalogs = [] operationsFlag = False if result['OK']: fileCatalogs = result['Value'] operationsFlag = True else: res = self.reHelper.getEligibleResources('Catalog') if not res['OK']: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error(errStr, res['Message']) return S_ERROR(errStr) fileCatalogs = res['Value'] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res['OK']: return res catalogConfig = res['Value'] if operationsFlag: result = self.opHelper.getOptionsDict('/Services/Catalogs/%s' % catalogName) if not result['OK']: return result catalogConfig.update(result['Value']) if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search('Read', catalogConfig['AccessType']): if master: self.readCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.readCatalogs.append( (catalogName, oCatalog, master)) # If the catalog is write type if re.search('Write', catalogConfig['AccessType']): if master: self.writeCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.writeCatalogs.append( (catalogName, oCatalog, master)) return S_OK() def _getCatalogConfigDetails(self, catalogName): # First obtain the options that are available result = self.reHelper.getCatalogOptionsDict(catalogName) if not result['OK']: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options" gLogger.error(errStr, catalogName) return S_ERROR(errStr) catalogConfig = result['Value'] # The 'Status' option should be defined (default = 'Active') if not catalogConfig.has_key('Status'): warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined" gLogger.warn(warnStr, catalogName) catalogConfig['Status'] = 'Active' # The 'AccessType' option must be defined if not catalogConfig.has_key('AccessType'): errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined" gLogger.error(errStr, catalogName) return S_ERROR(errStr) # Anything other than 'True' in the 'Master' option means it is not if not catalogConfig.has_key('Master'): catalogConfig['Master'] = False elif catalogConfig['Master'] == 'True': catalogConfig['Master'] = True else: catalogConfig['Master'] = False return S_OK(catalogConfig) def _generateCatalogObject(self, catalogName): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue( '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False) if not useProxy: useProxy = self.opHelper.getValue( '/Services/Catalogs/%s/UseProxy' % catalogName, False) return FileCatalogFactory().createCatalog(catalogName, useProxy)
def __lookForCE( self ): knownces = self.am_getOption( 'BannedCEs', [] ) resources = Resources( self.voName ) result = resources.getEligibleResources( 'Computing', {'CEType':['LCG','CREAM'] } ) if not result['OK']: return result knownces = [ resources.getComputingElementValue( x, 'Host' ) for x in result['Value'] ] # result = gConfig.getSections( '/Resources/Sites' ) # if not result['OK']: # return # grids = result['Value'] # # for grid in grids: # # result = gConfig.getSections( '/Resources/Sites/%s' % grid ) # if not result['OK']: # return # sites = result['Value'] # # for site in sites: # opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value'] # ces = List.fromChar( opt.get( 'CE', '' ) ) # knownces += ces response = ldapCEState( '', vo = self.voName ) if not response['OK']: self.log.error( "Error during BDII request", response['Message'] ) response = self.__checkAlternativeBDIISite( ldapCEState, '', self.voName ) return response newces = {} for queue in response['Value']: try: queuename = queue['GlueCEUniqueID'] except: continue cename = queuename.split( ":" )[0] if not cename in knownces: newces[cename] = None self.log.debug( "newce", cename ) body = "" possibleNewSites = [] for ce in newces.iterkeys(): response = ldapCluster( ce ) if not response['OK']: self.log.warn( "Error during BDII request", response['Message'] ) response = self.__checkAlternativeBDIISite( ldapCluster, ce ) continue clusters = response['Value'] if len( clusters ) != 1: self.log.warn( "Error in cluster length", " CE %s Length %d" % ( ce, len( clusters ) ) ) if len( clusters ) == 0: continue cluster = clusters[0] fkey = cluster.get( 'GlueForeignKey', [] ) if type( fkey ) == type( '' ): fkey = [fkey] nameBDII = None for entry in fkey: if entry.count( 'GlueSiteUniqueID' ): nameBDII = entry.split( '=' )[1] break if not nameBDII: continue cestring = "CE: %s, GOCDB Name: %s" % ( ce, nameBDII ) self.log.info( cestring ) response = ldapCE( ce ) if not response['OK']: self.log.warn( "Error during BDII request", response['Message'] ) response = self.__checkAlternativeBDIISite( ldapCE, ce ) continue ceinfos = response['Value'] if len( ceinfos ): ceinfo = ceinfos[0] systemName = ceinfo.get( 'GlueHostOperatingSystemName', 'Unknown' ) systemVersion = ceinfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' ) systemRelease = ceinfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' ) else: systemName = "Unknown" systemVersion = "Unknown" systemRelease = "Unknown" osstring = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease ) self.log.info( osstring ) response = ldapCEState( ce, vo = self.voName ) if not response['OK']: self.log.warn( "Error during BDII request", response['Message'] ) response = self.__checkAlternativeBDIISite( ldapCEState, ce, self.voName ) continue newcestring = "\n\n%s\n%s" % ( cestring, osstring ) usefull = False cestates = response['Value'] for cestate in cestates: queuename = cestate.get( 'GlueCEUniqueID', 'UnknownName' ) queuestatus = cestate.get( 'GlueCEStateStatus', 'UnknownStatus' ) queuestring = "%s %s" % ( queuename, queuestatus ) self.log.info( queuestring ) newcestring += "\n%s" % queuestring if queuestatus.count( 'Production' ): usefull = True if usefull: body += newcestring possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % ( nameBDII, ce ) ) if body: body = "We are glad to inform You about new CE(s) possibly suitable for %s:\n" % self.voName + body body += "\n\nTo suppress information about CE add its name to BannedCEs list." for possibleNewSite in possibleNewSites: body = "%s\n%s" % ( body, possibleNewSite ) self.log.info( body ) if self.addressTo and self.addressFrom: notification = NotificationClient() result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False ) return S_OK()