def export_getTestHistory(self, elementType, element, fromDate, toDate): gLogger.info('getTestHistory') if fromDate > toDate: return S_ERROR('from date can not be after the to date.') selectElements = [] if elementType == 'Site': if element.split('.')[ 0 ] == 'CLOUD': selectElements.append( element ) else: selectElements += CSHelpers.getSiteComputingElements(element) selectElements += CSHelpers.getSiteStorageElements(element) else: selectElements = [ element ] queryRes = rmClient.selectSAMResultLog( elementName = selectElements, meta = { 'newer' : ['LastCheckTime', fromDate ], 'older' : [ 'LastCheckTime', toDate ], 'columns' : [ 'ElementName', 'TestType', 'Status', 'LastCheckTime' ] } ) if not queryRes[ 'OK' ]: return queryRes records = queryRes[ 'Value' ] testHistory = {} for record in records: key = record[ 0 ] + '-' + record[ 1 ] if key not in testHistory: testHistory[ key ] = [] testHistory[ key ].append(( record[ 3 ], record[ 2 ] )) return S_OK(testHistory)
def export_getSAMSummary(self, siteName, vo): """ Returns SAM tests status for the elements of the given site. :return: S_OK( { element : { 'ElementType' : 'WMSTest' : 'CVMFSTest' : 'BOSSTest' : 'SETest' : } } ) / S_ERROR """ gLogger.info('getSAMSummary') siteType = siteName.split('.')[ 0 ] if 'CLOUD' == siteType: ces = [ siteName ] else: ces = CSHelpers.getSiteComputingElements(siteName) ses = CSHelpers.getSiteStorageElements(siteName) samSummary = {} for ce in ces: samSummary[ ce ] = { 'ElementType' : 'ComputingElement' } for se in ses: samSummary[ se ] = { 'ElementType' : 'StorageElement' } lastCheckTime = datetime.utcnow().replace(microsecond = 0) - timedelta(hours = 24) queryRes = rmClient.selectResourceSAMStatus(elementName = ces, vO = vo, meta = { 'newer' : [ 'LastCheckTime', lastCheckTime ] }) if not queryRes[ 'OK' ]: return queryRes records = queryRes[ 'Value' ] columns = queryRes[ 'Columns' ] if ses != []: queryRes = rmClient.selectResourceSAMStatus(elementName = ses, meta = { 'newer' : [ 'LastCheckTime', lastCheckTime ] }) if not queryRes[ 'OK' ]: return queryRes records += queryRes[ 'Value' ] for record in records: samDict = dict(zip(columns, record)) elementName = samDict[ 'ElementName' ] samSummary[ elementName ][ 'Status' ] = samDict[ 'Status' ] tests = [ test.strip() for test in samDict[ 'Tests' ].split(',') ] queryRes = rmClient.selectSAMResult(elementName = elementName, testType = tests, meta = { 'newer' : [ 'LastCheckTime', lastCheckTime ] }) if not queryRes[ 'OK' ]: return queryRes testRecords = queryRes[ 'Value' ] testColumns = queryRes[ 'Columns' ] for testRecord in testRecords: testDict = dict(zip(testColumns, testRecord)) samSummary[ elementName ][ testDict[ 'TestType' ] ] = testDict[ 'Status' ] return S_OK(samSummary)
def __init__( self ): # Warm up local CS # I am not sure whether it is needed but # it was used in DIRAC.ResourceStatusSystem.Utilities.Synchronizer CSHelpers.warmUp() self._accessUserGroup = getAllowedGroupName() #only users belonging to group with this property are allowed to connect self._accessProperty = getAllowedHostProperty() #only host with this property are allowed to connect
def export_getTree(self, elementType, elementName): """ Given an element type and name, finds its parent site and returns all descendants of that site. """ gLogger.info('getTree') site = self.getSite(elementType, elementName) if not site: return S_ERROR('No site') siteStatus = rsClient.selectStatusElement( 'Site', 'Status', name=site, meta={'columns': ['StatusType', 'Status']}) if not siteStatus['OK']: return siteStatus tree = {site: {'statusTypes': dict(siteStatus['Value'])}} ces = CSHelpers.getSiteComputingElements(site) cesStatus = rsClient.selectStatusElement( 'Resource', 'Status', name=ces, meta={'columns': ['Name', 'StatusType', 'Status']}) if not cesStatus['OK']: return cesStatus ses = CSHelpers.getSiteStorageElements(site) sesStatus = rsClient.selectStatusElement( 'Resource', 'Status', name=ses, meta={'columns': ['Name', 'StatusType', 'Status']}) if not sesStatus['OK']: return sesStatus def feedTree(elementsList): elements = {} for elementTuple in elementsList['Value']: name, statusType, status = elementTuple if name not in elements: elements[name] = {} elements[name][statusType] = status return elements tree[site]['ces'] = feedTree(cesStatus) tree[site]['ses'] = feedTree(sesStatus) return S_OK(tree)
def __init__(self, rStatus=None, rManagement=None): # Warm up local CS CSHelpers.warmUp() if rStatus is None: self.rStatus = ResourceStatusClient.ResourceStatusClient() if rManagement is None: self.rManagement = ResourceManagementClient() self.rssConfig = RssConfiguration()
def __init__(self, rStatus=None, rManagement=None): # Warm up local CS CSHelpers.warmUp() if rStatus is None: self.rStatus = ResourceStatusClient.ResourceStatusClient() if rManagement is None: self.rManagement = ResourceManagementClient.ResourceManagementClient() self.rssConfig = RssConfiguration()
def doMaster(self): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the fts, the ces and file catalogs. ''' gocSites = CSHelpers.getGOCSites() if not gocSites['OK']: return gocSites gocSites = gocSites['Value'] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts['OK']: return sesHosts sesHosts = sesHosts['Value'] resources = sesHosts # # #FIXME: file catalogs need also to use their hosts # something similar applies to FTS Channels # #fts = CSHelpers.getFTS() #if fts[ 'OK' ]: # resources = resources + fts[ 'Value' ] #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce['OK']: resources = resources + ce['Value'] gLogger.verbose('Processing Sites: %s' % ', '.join(gocSites)) siteRes = self.doNew(('Site', gocSites)) if not siteRes['OK']: self.metrics['failed'].append(siteRes['Message']) gLogger.verbose('Processing Resources: %s' % ', '.join(resources)) resourceRes = self.doNew(('Resource', resources)) if not resourceRes['OK']: self.metrics['failed'].append(resourceRes['Message']) return S_OK(self.metrics) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def doMaster( self ): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the fts, the ces and file catalogs. ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts[ 'OK' ]: return sesHosts sesHosts = sesHosts[ 'Value' ] resources = sesHosts # # #FIXME: file catalogs need also to use their hosts # something similar applies to FTS Channels # #fts = CSHelpers.getFTS() #if fts[ 'OK' ]: # resources = resources + fts[ 'Value' ] #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce[ 'OK' ]: resources = resources + ce[ 'Value' ] gLogger.info( 'Processing Sites: %s' % ', '.join( gocSites ) ) siteRes = self.doNew( ( 'Site', gocSites ) ) if not siteRes[ 'OK' ]: self.metrics[ 'failed' ].append( siteRes[ 'Message' ] ) gLogger.info( 'Processing Resources: %s' % ', '.join( resources ) ) resourceRes = self.doNew( ( 'Resource', resources ) ) if not resourceRes[ 'OK' ]: self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def doMaster( self ): ''' Master method, which looks little bit spaghetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the the CEs (FTS and file catalogs will come). ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts[ 'OK' ]: return sesHosts sesHosts = sesHosts[ 'Value' ] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer[ 'OK' ]: resources.extend( ftsServer[ 'Value' ] ) #TODO: file catalogs need also to use their hosts #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce[ 'OK' ]: resources.extend( ce[ 'Value' ] ) gLogger.verbose( 'Processing Sites: %s' % ', '.join( gocSites ) ) siteRes = self.doNew( ( 'Site', gocSites ) ) if not siteRes[ 'OK' ]: self.metrics[ 'failed' ].append( siteRes[ 'Message' ] ) gLogger.verbose( 'Processing Resources: %s' % ', '.join( resources ) ) resourceRes = self.doNew( ( 'Resource', resources ) ) if not resourceRes[ 'OK' ]: self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def doMaster(self): """ Master method, which looks little bit spaguetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the fts, the ces and file catalogs. """ gocSites = CSHelpers.getGOCSites() if not gocSites["OK"]: return gocSites gocSites = gocSites["Value"] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts["OK"]: return sesHosts sesHosts = sesHosts["Value"] resources = sesHosts # # # FIXME: file catalogs need also to use their hosts # something similar applies to FTS Channels # # fts = CSHelpers.getFTS() # if fts[ 'OK' ]: # resources = resources + fts[ 'Value' ] # fc = CSHelpers.getFileCatalogs() # if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce["OK"]: resources = resources + ce["Value"] gLogger.verbose("Processing Sites: %s" % ", ".join(gocSites)) siteRes = self.doNew(("Site", gocSites)) if not siteRes["OK"]: self.metrics["failed"].append(siteRes["Message"]) gLogger.verbose("Processing Resources: %s" % ", ".join(resources)) resourceRes = self.doNew(("Resource", resources)) if not resourceRes["OK"]: self.metrics["failed"].append(resourceRes["Message"]) return S_OK(self.metrics)
def export_getFreeDiskSpace(self, site, token): """ Exporting to web the """ endpoint2Site = {} ses = CSHelpers.getStorageElements() if not ses['OK']: gLogger.error(ses['Message']) return ses for seName in ses['Value']: res = CSHelpers.getStorageElementEndpoint(seName) if not res['OK']: continue if not res['Value'] in endpoint2Site: endpoint2Site[res['Value']] = seName.split('-', 1)[0] endpointSet = set() if site: if isinstance(site, basestring): site = [site] for ep, siteName in endpoint2Site.items(): if siteName in site: endpointSet.add(ep) if endpointSet: endpoint = list(endpointSet) else: endpoint = None res = rmClient.selectSpaceTokenOccupancyCache(endpoint=endpoint, token=token) if not res['OK']: return res spList = [dict(zip(res['Columns'], sp)) for sp in res['Value']] for spd in spList: try: spd['Site'] = endpoint2Site[spd['Endpoint']] except KeyError: spd['Site'] = 'Unknown' return S_OK(spList)
def _cleanCommand(self, toDelete=None): """ Clean the spaceTokenOccupancy table from old endpoints :param tuple toDelete: endpoint to remove (endpoint, storage_element_name), e.g. ('httpg://srm-lhcb.cern.ch:8443/srm/managerv2', CERN-RAW) """ if not toDelete: toDelete = [] res = self.rmClient.selectSpaceTokenOccupancyCache() if not res['OK']: return res storedSEsSet = set([(sse[0], sse[1]) for sse in res['Value']]) currentSEsSet = set() currentSEs = DMSHelpers().getStorageElements() for cse in currentSEs: res = CSHelpers.getStorageElementEndpoint(cse) if not res['OK']: self.log.warn("Could not get endpoint", res['Message']) continue endpoint = res['Value'][0] currentSEsSet.add((endpoint, cse)) toDelete = list(storedSEsSet - currentSEsSet) else: toDelete = [toDelete] for ep in toDelete: res = self.rmClient.deleteSpaceTokenOccupancyCache(ep[0], ep[1]) if not res['OK']: self.log.warn("Could not delete entry from SpaceTokenOccupancyCache", res['Message']) return S_OK()
def doCache(self): if not self.args['site']: return S_ERROR('site was not found in args') site = self.args['site'] elements = CSHelpers.getSiteElements(site) statusList = [] if elements['OK']: for element in elements['Value']: status = self.rssClient.selectStatusElement("Resource", "Status", element, meta={'columns': ['Status']}) if not status['OK']: return status if status['Value']: statusList.append(status['Value'][0][0]) else: # forcing in the case the resource has no status (yet) statusList.append('Active') if 'Active' in statusList: return S_OK({'Status': 'Active', 'Reason': 'An element that belongs to the site is Active'}) if 'Degraded' in statusList: return S_OK({'Status': 'Degraded', 'Reason': 'An element that belongs to the site is Degraded'}) return S_OK({'Status': 'Banned', 'Reason': 'There is no Active element in the site'})
def export_getCachedDowntimes( self, element, elementType, name, severity, startDate, endDate ): if elementType == 'StorageElement': name = CSHelpers.getSEHost( name ) if not name['OK']: return name name = name['Value'] if startDate > endDate: return S_ERROR( 'startDate > endDate' ) res = rmClient.selectDowntimeCache( element = element, name = name, severity = severity, meta = { 'columns' : [ 'Element', 'Name', 'StartDate', 'EndDate', 'Severity', 'Description', 'Link' ] } ) if not res[ 'OK' ]: return res downtimes = [] for dt in res[ 'Value' ]: dtDict = dict( zip( res[ 'Columns' ], dt ) ) if dtDict[ 'StartDate' ] < endDate and dtDict[ 'EndDate' ] > startDate: downtimes.append( dt ) result = S_OK( downtimes ) result[ 'Columns' ] = res[ 'Columns' ] return result
def doMaster( self ): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all gocSites. As there is no bulk query, it compares with what we have on the database. It queries a portion of them. ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] # resQuery = self.rmClient.selectGGUSTicketsCache( meta = { 'columns' : [ 'GocSite' ] } ) # if not resQuery[ 'OK' ]: # return resQuery # resQuery = [ element[0] for element in resQuery[ 'Value' ] ] # # gocNamesToQuery = set( gocSites ).difference( set( resQuery ) ) self.log.info( 'Processing %s' % ', '.join( gocSites ) ) for gocNameToQuery in gocSites: # if gocNameToQuery is None: # self.metrics[ 'failed' ].append( 'None result' ) # continue result = self.doNew( gocNameToQuery ) if not result[ 'OK' ]: self.metrics[ 'failed' ].append( result ) return S_OK( self.metrics )
def doCommand(self): """ Returns failed jobs using the DIRAC accounting system for every site for the last self.args[0] hours :params: :attr:`sites`: list of sites (when not given, take every site) :returns: """ if 'hours' not in self.args: return S_ERROR('Number of hours not specified') hours = self.args['hours'] sites = None if 'sites' in self.args: sites = self.args['sites'] if sites is None: #FIXME: pointing to the CSHelper instead # sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} ) # if not sources[ 'OK' ]: # return sources # sources = [ si[0] for si in sources[ 'Value' ] ] sites = CSHelpers.getSites() if not sites['OK']: return sites sites = sites['Value'] if not sites: return S_ERROR('Sites is empty') fromD = datetime.utcnow() - timedelta(hours=hours) toD = datetime.utcnow() failedPilots = self.rClient.getReport('Pilot', 'NumberOfPilots', fromD, toD, { 'GridStatus': ['Aborted'], 'Site': sites }, 'Site') if not failedPilots['OK']: return failedPilots failedPilots = failedPilots['Value'] if not 'data' in failedPilots: return S_ERROR('Missing data key') if not 'granularity' in failedPilots: return S_ERROR('Missing granularity key') singlePlots = {} for site, value in failedPilots['data'].items(): if site in sites: plot = {} plot['data'] = {site: value} plot['granularity'] = failedPilots['granularity'] singlePlots[site] = plot return S_OK(singlePlots)
def doCommand( self ): """ Returns running and runned jobs, querying the WMSHistory for the last self.args[0] hours :params: :attr:`sites`: list of sites (when not given, take every sites) :returns: """ if not 'hours' in self.args: return S_ERROR( 'Number of hours not specified' ) hours = self.args[ 'hours' ] sites = None if 'sites' in self.args: sites = self.args[ 'sites' ] if sites is None: #FIXME: pointing to the CSHelper instead # sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} ) # if not sources[ 'OK' ]: # return sources # sources = [ si[0] for si in sources[ 'Value' ] ] sites = CSHelpers.getSites() if not sites[ 'OK' ]: return sites sites = sites[ 'Value' ] if not sites: return S_ERROR( 'Sites is empty' ) fromD = datetime.utcnow() - timedelta( hours = hours ) toD = datetime.utcnow() runJobs = self.rClient.getReport( 'WMSHistory', 'NumberOfJobs', fromD, toD, {}, 'Site') if not runJobs[ 'OK' ]: return runJobs runJobs = runJobs[ 'Value' ] if not 'data' in runJobs: return S_ERROR( 'Missing data key' ) if not 'granularity' in runJobs: return S_ERROR( 'Missing granularity key' ) singlePlots = {} for site, value in runJobs[ 'data' ].items(): if site in sites: plot = {} plot[ 'data' ] = { site: value } plot[ 'granularity' ] = runJobs[ 'granularity' ] singlePlots[ site ] = plot return S_OK( singlePlots ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def doMaster(self): siteNames = getSites() if not siteNames['OK']: return siteNames siteNames = siteNames['Value'] ces = CSHelpers.getComputingElements() if not ces['OK']: return ces ces = ces['Value'] for site in siteNames: # 2 hours of window result = self.doNew((site, None, 1)) if not result['OK']: self.metrics['failed'].append(result) for ce in ces: # 2 hours of window result = self.doNew((None, ce, 1)) if not result['OK']: self.metrics['failed'].append(result) return S_OK(self.metrics)
def export_getSitesResources( self, siteNames ): resources = Resources.Resources() if siteNames is None: siteNames = Resources.getSites() if not siteNames[ 'OK' ]: return siteNames siteNames = siteNames[ 'Value' ] if isinstance( siteNames, str ): siteNames = [ siteNames ] sitesRes = {} for siteName in siteNames: res = {} res[ 'ces' ] = resources.getEligibleResources( 'Computing', { 'Site': siteName } ) ses = resources.getEligibleStorageElements( { 'Site': siteName } ) sesHosts = CSHelpers.getStorageElementsHosts( ses ) if not sesHosts[ 'OK' ]: return sesHosts res[ 'ses' ] = list( set( sesHosts[ 'Value' ] ) ) sitesRes[ siteName ] = res return S_OK( sitesRes )
def export_getCachedDowntimes( self, element, elementType, elementName, severity, startDate, endDate ): if elementType == 'StorageElement': result = CSHelpers.getSEProtocolOption( elementName, 'Host' ) if not result['OK']: return S_ERROR( 'StorageElement %s host not found' % elementName ) name = result['Value'] if startDate > endDate: return S_ERROR( 'startDate > endDate' ) res = rmClient.selectDowntimeCache( element = element, name = name, severity = severity, meta = { 'columns' : [ 'Element', 'Name', 'StartDate', 'EndDate', 'Severity', 'Description', 'Link' ] } ) if not res[ 'OK' ]: return res downtimes = [] for dt in res[ 'Value' ]: dtDict = dict( zip( res[ 'Columns' ], dt ) ) if dtDict[ 'StartDate' ] < endDate and dtDict[ 'EndDate' ] > startDate: downtimes.append( dt ) result = S_OK( downtimes ) result[ 'Columns' ] = res[ 'Columns' ] return result #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def __init__(self, rStatus=None, rManagement=None, defaultStatus="Unknown"): # Warm up local CS CSHelpers.warmUp() if rStatus is None: self.rStatus = ResourceStatusClient() if rManagement is None: self.rManagement = ResourceManagementClient() self.defaultStatus = defaultStatus self.rssConfig = RssConfiguration() self.tokenOwner = "rs_svc" result = getProxyInfo() if result['OK']: self.tokenOwner = result['Value']['username']
def doMaster(self): """ Master method, which looks little bit spaguetti code, sorry ! - It gets all gocSites. As there is no bulk query, it compares with what we have on the database. It queries a portion of them. """ gocSites = CSHelpers.getGOCSites() if not gocSites["OK"]: return gocSites gocSites = gocSites["Value"] # resQuery = self.rmClient.selectGGUSTicketsCache( meta = { 'columns' : [ 'GocSite' ] } ) # if not resQuery[ 'OK' ]: # return resQuery # resQuery = [ element[0] for element in resQuery[ 'Value' ] ] # # gocNamesToQuery = set( gocSites ).difference( set( resQuery ) ) gLogger.info("Processing %s" % ", ".join(gocSites)) for gocNameToQuery in gocSites: # if gocNameToQuery is None: # self.metrics[ 'failed' ].append( 'None result' ) # continue result = self.doNew(gocNameToQuery) if not result["OK"]: self.metrics["failed"].append(result) return S_OK(self.metrics)
def doMaster( self ): ''' Master method, which looks little bit spaghetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the the CEs (FTS and file catalogs will come). ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts[ 'OK' ]: return sesHosts sesHosts = sesHosts[ 'Value' ] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer[ 'OK' ]: resources.extend( ftsServer[ 'Value' ] ) #TODO: file catalogs need also to use their hosts #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce[ 'OK' ]: resources.extend( ce[ 'Value' ] ) self.log.verbose( 'Processing Sites: %s' % ', '.join( gocSites ) ) siteRes = self.doNew( ( 'Site', gocSites ) ) if not siteRes[ 'OK' ]: self.metrics[ 'failed' ].append( siteRes[ 'Message' ] ) self.log.verbose( 'Processing Resources: %s' % ', '.join( resources ) ) resourceRes = self.doNew( ( 'Resource', resources ) ) if not resourceRes[ 'OK' ]: self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] ) return S_OK( self.metrics )
def export_getTree(self, elementType, elementName): """ Given an element type and name, finds its parent site and returns all descendants of that site. """ gLogger.info('getTree') site = self.getSite(elementType, elementName) if not site: return S_ERROR('No site') siteStatus = rsClient.selectStatusElement('Site', 'Status', name=site, meta={'columns': ['StatusType', 'Status']}) if not siteStatus['OK']: return siteStatus tree = {site: {'statusTypes': dict(siteStatus['Value'])}} ces = CSHelpers.getSiteComputingElements(site) cesStatus = rsClient.selectStatusElement('Resource', 'Status', name=ces, meta={'columns': ['Name', 'StatusType', 'Status']}) if not cesStatus['OK']: return cesStatus ses = CSHelpers.getSiteStorageElements(site) sesStatus = rsClient.selectStatusElement('Resource', 'Status', name=ses, meta={'columns': ['Name', 'StatusType', 'Status']}) if not sesStatus['OK']: return sesStatus def feedTree(elementsList): elements = {} for elementTuple in elementsList['Value']: name, statusType, status = elementTuple if name not in elements: elements[name] = {} elements[name][statusType] = status return elements tree[site]['ces'] = feedTree(cesStatus) tree[site]['ses'] = feedTree(sesStatus) return S_OK(tree)
def doCommand( self ): """ Returns failed jobs using the DIRAC accounting system for every site for the last self.args[0] hours :params: :attr:`sites`: list of sites (when not given, take every site) :returns: """ if 'hours' not in self.args: return S_ERROR( 'Number of hours not specified' ) hours = self.args[ 'hours' ] sites = None if 'sites' in self.args: sites = self.args[ 'sites' ] if sites is None: #FIXME: pointing to the CSHelper instead # sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} ) # if not sources[ 'OK' ]: # return sources # sources = [ si[0] for si in sources[ 'Value' ] ] sites = CSHelpers.getSites() if not sites[ 'OK' ]: return sites sites = sites[ 'Value' ] if not sites: return S_ERROR( 'Sites is empty' ) fromD = datetime.utcnow() - timedelta( hours = hours ) toD = datetime.utcnow() failedPilots = self.rClient.getReport( 'Pilot', 'NumberOfPilots', fromD, toD, { 'GridStatus' : [ 'Aborted' ], 'Site' : sites }, 'Site' ) if not failedPilots[ 'OK' ]: return failedPilots failedPilots = failedPilots[ 'Value' ] if not 'data' in failedPilots: return S_ERROR( 'Missing data key' ) if not 'granularity' in failedPilots: return S_ERROR( 'Missing granularity key' ) singlePlots = {} for site, value in failedPilots[ 'data' ].items(): if site in sites: plot = {} plot[ 'data' ] = { site: value } plot[ 'granularity' ] = failedPilots[ 'granularity' ] singlePlots[ site ] = plot return S_OK( singlePlots )
def doMaster(self): """ Master method, which looks little bit spaghetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the the CEs (FTS and file catalogs will come). """ gocSites = CSHelpers.getGOCSites() if not gocSites["OK"]: return gocSites gocSites = gocSites["Value"] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts["OK"]: return sesHosts sesHosts = sesHosts["Value"] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer["OK"]: resources.extend(ftsServer["Value"]) # TODO: file catalogs need also to use their hosts # fc = CSHelpers.getFileCatalogs() # if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = CSHelpers.getComputingElements() if ce["OK"]: resources.extend(ce["Value"]) self.log.verbose("Processing Sites: %s" % ", ".join(gocSites)) siteRes = self.doNew(("Site", gocSites)) if not siteRes["OK"]: self.metrics["failed"].append(siteRes["Message"]) self.log.verbose("Processing Resources: %s" % ", ".join(resources)) resourceRes = self.doNew(("Resource", resources)) if not resourceRes["OK"]: self.metrics["failed"].append(resourceRes["Message"]) return S_OK(self.metrics)
def export_getComputingElements(self): """ Returns the list of all CEs. """ gLogger.info('getComputingElements') return CSHelpers.getComputingElements()
def export_getStorageElements(self): """ Returns the list of all SEs. """ gLogger.info('getStorageElements') return CSHelpers.getStorageElements()
def export_getSites(self): """ Returns list of all sites considered by RSS :return: S_OK( [ sites ] ) | S_ERROR """ gLogger.info('getSites') return CSHelpers.getSites()
def doCommand(self): """ Returns running and runned jobs, querying the WMSHistory for the last self.args[0] hours :params: :attr:`sites`: list of sites (when not given, take every sites) :returns: """ if 'hours' not in self.args: return S_ERROR('Number of hours not specified') hours = self.args['hours'] sites = None if 'sites' in self.args: sites = self.args['sites'] if sites is None: #FIXME: pointing to the CSHelper instead # sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} ) # if not sources[ 'OK' ]: # return sources # sources = [ si[0] for si in sources[ 'Value' ] ] sites = CSHelpers.getSites() if not sites['OK']: return sites sites = sites['Value'] if not sites: return S_ERROR('Sites is empty') fromD = datetime.utcnow() - timedelta(hours=hours) toD = datetime.utcnow() runJobs = self.rClient.getReport('WMSHistory', 'NumberOfJobs', fromD, toD, {}, 'Site') if not runJobs['OK']: return runJobs runJobs = runJobs['Value'] if not 'data' in runJobs: return S_ERROR('Missing data key') if not 'granularity' in runJobs: return S_ERROR('Missing granularity key') singlePlots = {} for site, value in runJobs['data'].items(): if site in sites: plot = {} plot['data'] = {site: value} plot['granularity'] = runJobs['granularity'] singlePlots[site] = plot return S_OK(singlePlots)
def export_getDowntimes( self, element, elementType, name ): if elementType == 'StorageElement': name = CSHelpers.getSEHost( name ) return rmClient.selectDowntimeCache( element = element, name = name, meta = { 'columns' : [ 'StartDate', 'EndDate', 'Link', 'Description', 'Severity' ] } )
def doMaster(self): ''' Master method. Gets all endpoints from the storage elements and all the spaceTokens. Could have taken from Shares/Disk as well. It queries for all their possible combinations, unless there are records in the database for those combinations, which then are not queried. ''' spaceTokens = CSHelpers.getSpaceTokens() if not spaceTokens['OK']: return spaceTokens spaceTokens = spaceTokens['Value'] elementsToCheck = [] seEndpoints = CSHelpers.getStorageElementEndpoints() if not seEndpoints['OK']: return seEndpoints seEndpoints = seEndpoints['Value'] for seEndpoint in seEndpoints: for spaceToken in spaceTokens: elementsToCheck.append((seEndpoint, spaceToken)) # resQuery = self.rmClient.selectSpaceTokenOccupancyCache( meta = { 'columns' : [ 'Endpoint', 'Token' ] } ) # if not resQuery[ 'OK' ]: # return resQuery # resQuery = resQuery[ 'Value' ] # # elementsToQuery = list( set( elementsToCheck ).difference( set( resQuery ) ) ) gLogger.verbose('Processing %s' % elementsToCheck) for elementToQuery in elementsToCheck: result = self.doNew(elementToQuery) if not result['OK']: self.metrics['failed'].append(result) return S_OK(self.metrics) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def doMaster( self ): ''' Master method. Gets all endpoints from the storage elements and all the spaceTokens. Could have taken from Shares/Disk as well. It queries for all their possible combinations, unless there are records in the database for those combinations, which then are not queried. ''' spaceTokens = CSHelpers.getSpaceTokens() if not spaceTokens[ 'OK' ]: return spaceTokens spaceTokens = spaceTokens[ 'Value' ] elementsToCheck = [] seEndpoints = CSHelpers.getStorageElementEndpoints() if not seEndpoints[ 'OK' ]: return seEndpoints seEndpoints = seEndpoints[ 'Value' ] for seEndpoint in seEndpoints: for spaceToken in spaceTokens: elementsToCheck.append( ( seEndpoint, spaceToken ) ) # resQuery = self.rmClient.selectSpaceTokenOccupancyCache( meta = { 'columns' : [ 'Endpoint', 'Token' ] } ) # if not resQuery[ 'OK' ]: # return resQuery # resQuery = resQuery[ 'Value' ] # # elementsToQuery = list( set( elementsToCheck ).difference( set( resQuery ) ) ) gLogger.verbose( 'Processing %s' % elementsToCheck ) for elementToQuery in elementsToCheck: result = self.doNew( elementToQuery ) if not result[ 'OK' ]: self.metrics[ 'failed' ].append( result ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def export_getSiteResource(self, siteName): """ Returns the dictionary with CEs and SEs for the given site. :return: S_OK( { 'ComputingElement' : celist, 'StorageElement' : selist } ) | S_ERROR """ gLogger.info('getSiteResource') siteType = siteName.split('.')[ 0 ] if siteType == 'CLOUD': ces = [] else: ces = CSHelpers.getSiteComputingElements(siteName) ses = CSHelpers.getSiteStorageElements(siteName) return S_OK({ 'ComputingElement' : ces, 'StorageElement' : ses })
def __removeNonExistingResourcesFromRM(self): ''' Remove resources from DowntimeCache table that no longer exist in the CS. ''' if not getServiceURL("ResourceStatus/ResourceManagement"): gLogger.verbose( 'ResourceManagement is not installed, skipping removal of non existing resources...' ) return S_OK() sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts['OK']: return sesHosts sesHosts = sesHosts['Value'] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer['OK']: resources.extend(ftsServer['Value']) ce = CSHelpers.getComputingElements() if ce['OK']: resources.extend(ce['Value']) downtimes = self.rManagement.selectDowntimeCache() if not downtimes['OK']: return downtimes # Remove hosts that no longer exist in the CS for host in downtimes['Value']: gLogger.verbose('Checking if %s is still in the CS' % host[0]) if host[0] not in resources: gLogger.verbose('%s is no longer in CS, removing entry...' % host[0]) result = self.rManagement.deleteDowntimeCache(name=host[0]) if not result['OK']: return result return S_OK()
def doMaster(self): """ Master method. Gets all endpoints from the storage elements and all the spaceTokens. Could have taken from Shares/Disk as well. It queries for all their possible combinations, unless there are records in the database for those combinations, which then are not queried. """ self.log.verbose("Getting all SEs defined in the CS") storageElementNames = CSHelpers.getStorageElements() if not storageElementNames["OK"]: self.log.warn(storageElementNames["Message"]) return storageElementNames storageElementNames = storageElementNames["Value"] endpointTokenSet = set() for storageElementName in storageElementNames: endpoint = CSHelpers.getStorageElementEndpoint(storageElementName) if not endpoint["OK"]: self.log.warn(endpoint["Message"]) continue endpoint = endpoint["Value"] spaceToken = CSHelpers.getSEToken(storageElementName) if not spaceToken["OK"]: self.log.warn(spaceToken["Message"]) continue spaceToken = spaceToken["Value"] endpointTokenSet.add((endpoint, spaceToken)) self.log.verbose("Processing %s" % endpointTokenSet) for elementToQuery in endpointTokenSet: result = self.doNew(elementToQuery) if not result["OK"]: self.metrics["failed"].append(result) return S_OK(self.metrics)
def doMaster(self): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all Sites. - It gets all StorageElements As there is no bulk query, it compares with what we have on the database. It queries a portion of them. ''' sites = CSHelpers.getSites() if not sites['OK']: return sites sites = sites['Value'] ses = CSHelpers.getStorageElements() if not ses['OK']: return ses ses = ses['Value'] elementNames = sites + ses # sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } ) # if not sourceQuery[ 'OK' ]: # return sourceQuery # sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ] # # sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) ) gLogger.info('Processing %s' % ', '.join(elementNames)) for metric in ['Quality', 'FailedTransfers']: for direction in ['Source', 'Destination']: # 2 hours of window result = self.doNew((2, elementNames, direction, metric)) if not result['OK']: self.metrics['failed'].append(result) return S_OK(self.metrics) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def __removeNonExistingResourcesFromRM(self): ''' Remove resources from DowntimeCache table that no longer exist in the CS. ''' if not getServiceURL("ResourceStatus/ResourceManagement"): gLogger.verbose( 'ResourceManagement is not installed, skipping removal of non existing resources...') return S_OK() sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts['OK']: return sesHosts sesHosts = sesHosts['Value'] resources = sesHosts ftsServer = getFTS3Servers() if ftsServer['OK']: resources.extend(ftsServer['Value']) ce = CSHelpers.getComputingElements() if ce['OK']: resources.extend(ce['Value']) downtimes = self.rManagement.selectDowntimeCache() if not downtimes['OK']: return downtimes # Remove hosts that no longer exist in the CS for host in downtimes['Value']: gLogger.verbose('Checking if %s is still in the CS' % host[0]) if host[0] not in resources: gLogger.verbose( '%s is no longer in CS, removing entry...' % host[0]) result = self.rManagement.deleteDowntimeCache(name=host[0]) if not result['OK']: return result return S_OK()
def doMaster(self): ''' Master method. Gets all endpoints from the storage elements and all the spaceTokens. Could have taken from Shares/Disk as well. It queries for all their possible combinations, unless there are records in the database for those combinations, which then are not queried. ''' self.log.verbose("Getting all SEs defined in the CS") storageElementNames = CSHelpers.getStorageElements() if not storageElementNames['OK']: self.log.warn(storageElementNames['Message']) return storageElementNames storageElementNames = storageElementNames['Value'] endpointTokenSet = set() for storageElementName in storageElementNames: endpoint = CSHelpers.getStorageElementEndpoint(storageElementName) if not endpoint['OK']: self.log.warn(endpoint['Message']) continue endpoint = endpoint['Value'] spaceToken = CSHelpers.getSEToken(storageElementName) if not spaceToken['OK']: self.log.warn(spaceToken['Message']) continue spaceToken = spaceToken['Value'] endpointTokenSet.add((endpoint, spaceToken)) self.log.verbose('Processing %s' % endpointTokenSet) for elementToQuery in endpointTokenSet: result = self.doNew(elementToQuery) if not result['OK']: self.metrics['failed'].append(result) return S_OK(self.metrics)
def _prepareCommand( self ): ''' DowntimeCommand requires four arguments: - name : <str> - element : Site / Resource - elementType: <str> If the elements are Site(s), we need to get their GOCDB names. They may not have, so we ignore them if they do not have. ''' if 'name' not in self.args: return S_ERROR( '"name" not found in self.args' ) elementName = self.args[ 'name' ] if 'element' not in self.args: return S_ERROR( '"element" not found in self.args' ) element = self.args[ 'element' ] if 'elementType' not in self.args: return S_ERROR( '"elementType" not found in self.args' ) elementType = self.args[ 'elementType' ] if not element in [ 'Site', 'Resource' ]: return S_ERROR( 'element is not Site nor Resource' ) hours = None if 'hours' in self.args: hours = self.args[ 'hours' ] gocdbServiceType = None # Transform DIRAC site names into GOCDB topics if element == 'Site': gocSite = getGOCSiteName( elementName ) if not gocSite[ 'OK' ]: return gocSite elementName = gocSite[ 'Value' ] # The DIRAC se names mean nothing on the grid, but their hosts do mean. elif elementType == 'StorageElement': # We need to distinguish if it's tape or disk if getStorageElementOptions( elementName )['Value']['TapeSE']: gocdbServiceType = "srm.nearline" elif getStorageElementOptions( elementName )['Value']['DiskSE']: gocdbServiceType = "srm" seHost = CSHelpers.getSEHost( elementName ) if not seHost: return S_ERROR( 'No seHost for %s' % elementName ) elementName = seHost return S_OK( ( element, elementName, hours, gocdbServiceType ) )
def _prepareCommand( self ): ''' DowntimeCommand requires four arguments: - name : <str> - element : Site / Resource - elementType: <str> If the elements are Site(s), we need to get their GOCDB names. They may not have, so we ignore them if they do not have. ''' if 'name' not in self.args: return S_ERROR( '"name" not found in self.args' ) elementName = self.args[ 'name' ] if 'element' not in self.args: return S_ERROR( '"element" not found in self.args' ) element = self.args[ 'element' ] if 'elementType' not in self.args: return S_ERROR( '"elementType" not found in self.args' ) elementType = self.args[ 'elementType' ] if not element in [ 'Site', 'Resource' ]: return S_ERROR( 'element is not Site nor Resource' ) hours = None if 'hours' in self.args: hours = self.args[ 'hours' ] gocdbServiceType = None # Transform DIRAC site names into GOCDB topics if element == 'Site': gocSite = getGOCSiteName( elementName ) if not gocSite[ 'OK' ]: return gocSite elementName = gocSite[ 'Value' ] # The DIRAC se names mean nothing on the grid, but their hosts do mean. elif elementType == 'StorageElement': # We need to distinguish if it's tape or disk if getStorageElementOptions( elementName )['Value']['TapeSE']: gocdbServiceType = "srm" elif getStorageElementOptions( elementName )['Value']['DiskSE']: gocdbServiceType = "srm.nearline" seHost = CSHelpers.getSEHost( elementName ) if not seHost: return S_ERROR( 'No seHost for %s' % elementName ) elementName = seHost return S_OK( ( element, elementName, hours, gocdbServiceType ) )
def doMaster( self ): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all Sites. - It gets all StorageElements As there is no bulk query, it compares with what we have on the database. It queries a portion of them. ''' sites = CSHelpers.getSites() if not sites[ 'OK' ]: return sites sites = sites[ 'Value' ] ses = CSHelpers.getStorageElements() if not ses[ 'OK' ]: return ses ses = ses[ 'Value' ] elementNames = sites + ses # sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } ) # if not sourceQuery[ 'OK' ]: # return sourceQuery # sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ] # # sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) ) gLogger.info( 'Processing %s' % ', '.join( elementNames ) ) for metric in [ 'Quality', 'FailedTransfers' ]: for direction in [ 'Source', 'Destination' ]: # 2 hours of window result = self.doNew( ( 2, elementNames, direction, metric ) ) if not result[ 'OK' ]: self.metrics[ 'failed' ].append( result ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def __init__(self, rStatus=None, rManagement=None, defaultStatus="Unknown"): # Warm up local CS CSHelpers.warmUp() if rStatus is None: self.rStatus = ResourceStatusClient() if rManagement is None: self.rManagement = ResourceManagementClient() self.defaultStatus = defaultStatus self.rssConfig = RssConfiguration() # this just sets the main owner, "rs_svc" just mean "RSS service" self.tokenOwner = "rs_svc" # if we are running this script as a user (from a CLI), # the username found the proxy will be used as tokenOwner result = getProxyInfo() if result['OK']: self.tokenOwner = result['Value']['username']
def export_getDowntimes(self, element, elementType, name): if elementType == 'StorageElement': name = CSHelpers.getSEHost(name) return rmClient.selectDowntimeCache( element=element, name=name, meta={ 'columns': ['StartDate', 'EndDate', 'Link', 'Description', 'Severity'] })
def doMaster( self ): siteNames = CSHelpers.getSites() if not siteNames[ 'OK' ]: return siteNames siteNames = siteNames[ 'Value' ] ces = CSHelpers.getComputingElements() if not ces[ 'OK' ]: return ces ces = ces[ 'Value' ] pilotResults = self.doNew( ( 'Site', siteNames ) ) if not pilotResults[ 'OK' ]: self.metrics[ 'failed' ].append( pilotResults[ 'Message' ] ) pilotResults = self.doNew( ( 'Resource', ces ) ) if not pilotResults[ 'OK' ]: self.metrics[ 'failed' ].append( pilotResults[ 'Message' ] ) return S_OK( self.metrics )
def export_getDowntimes( self, element, elementType, elementName ): if elementType == 'StorageElement': result = CSHelpers.getSEProtocolOption( elementName, 'Host' ) if not result['OK']: return S_ERROR( 'StorageElement %s host not found' % elementName ) name = result['Value'] return rmClient.selectDowntimeCache( element = element, name = name, meta = { 'columns' : [ 'StartDate', 'EndDate', 'Link', 'Description', 'Severity' ] } )
def doMaster(self): siteNames = CSHelpers.getSites() if not siteNames['OK']: return siteNames siteNames = siteNames['Value'] ces = CSHelpers.getComputingElements() if not ces['OK']: return ces ces = ces['Value'] pilotResults = self.doNew(('Site', siteNames)) if not pilotResults['OK']: self.metrics['failed'].append(pilotResults['Message']) pilotResults = self.doNew(('Resource', ces)) if not pilotResults['OK']: self.metrics['failed'].append(pilotResults['Message']) return S_OK(self.metrics)
def doCommand(self): """ Returns simple jobs efficiency :attr:`args`: - args[0]: string: should be a ValidElement - args[1]: string should be the name of the ValidElement returns: { 'Result': 'Good'|'Fair'|'Poor'|'Idle'|'Bad' } """ if not 'siteName' in self.args: return self.returnERROR(S_ERROR('siteName is missing')) siteName = self.args['siteName'] # If siteName is None, we take all sites if siteName is None: siteName = CSHelpers.getSites() if not siteName['OK']: return self.returnERROR(siteName) siteName = siteName['Value'] results = self.wmsAdmin.getSiteSummaryWeb({'Site': siteName}, [], 0, 500) if not results['OK']: return self.returnERROR(results) results = results['Value'] if not 'ParameterNames' in results: return self.returnERROR(S_ERROR('Malformed result dictionary')) params = results['ParameterNames'] if not 'Records' in results: return self.returnERROR(S_ERROR('Malformed result dictionary')) records = results['Records'] jobResults = [] for record in records: jobDict = dict(zip(params, record)) try: jobDict['Efficiency'] = float(jobDict['Efficiency']) except KeyError, e: return self.returnERROR(S_ERROR(e)) except ValueError, e: return self.returnERROR(S_ERROR(e))
def _syncUsers(self): ''' Sync Users: compares CS with DB and does the necessary modifications. ''' gLogger.verbose('-- Synchronizing users --') usersCS = CSHelpers.getRegistryUsers() if not usersCS['OK']: return usersCS usersCS = usersCS['Value'] gLogger.verbose('%s users found in CS' % len(usersCS)) usersDB = self.rManagement.selectUserRegistryCache( meta={'columns': ['login']}) if not usersDB['OK']: return usersDB usersDB = [userDB[0] for userDB in usersDB['Value']] # Users that are in DB but not in CS toBeDeleted = list(set(usersDB).difference(set(usersCS.keys()))) gLogger.verbose('%s users to be deleted' % len(toBeDeleted)) # Delete users # FIXME: probably it is not needed since there is a DatabaseCleanerAgent for userLogin in toBeDeleted: deleteQuery = self.rManagement.deleteUserRegistryCache( login=userLogin) gLogger.verbose('... %s' % userLogin) if not deleteQuery['OK']: return deleteQuery # AddOrModify Users for userLogin, userDict in usersCS.items(): _name = userDict['DN'].split('=')[-1] _email = userDict['Email'] query = self.rManagement.addOrModifyUserRegistryCache( userLogin, _name, _email) gLogger.verbose('-> %s' % userLogin) if not query['OK']: return query return S_OK() ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
def export_getSitesResources(self, siteNames): """ Returns dictionary with SEs and CEs for the given site(s). If siteNames is None, all sites are taken into account. :return: S_OK( { site1 : { ces : [ ces ], 'ses' : [ ses ] },... } ) | S_ERROR """ gLogger.info('getSitesResources') if siteNames is None: siteNames = getSites() if not siteNames['OK']: return siteNames siteNames = siteNames['Value'] if isinstance(siteNames, basestring): siteNames = [siteNames] sitesRes = {} for siteName in siteNames: res = {} res['ces'] = CSHelpers.getSiteComputingElements(siteName) # Convert StorageElements to host names res = DMSHelpers().getSiteSEMapping() if not res['OK']: return res ses = res['Value'][1].get(siteName, []) sesHosts = CSHelpers.getStorageElementsHosts(ses) if not sesHosts['OK']: return sesHosts # Remove duplicates res['ses'] = list(set(sesHosts['Value'])) sitesRes[siteName] = res return S_OK(sitesRes)
def doNew(self, masterParams=None): """ Gets the parameters to run, either from the master method or from its own arguments. Gets the total and the free disk space of a storage element and inserts the results in the SpaceTokenOccupancyCache table of ResourceManagementDB database. The result is also returned to the caller, not only inserted. What is inserted in the DB will normally be in MB, what is returned will be in the specified unit. """ if masterParams is not None: elementName, unit = masterParams else: params = self._prepareCommand() if not params['OK']: return params elementName, unit = params['Value'] endpointResult = CSHelpers.getStorageElementEndpoint(elementName) if not endpointResult['OK']: return endpointResult se = StorageElement(elementName) occupancyResult = se.getOccupancy(unit=unit) if not occupancyResult['OK']: return occupancyResult occupancy = occupancyResult['Value'] free = occupancy['Free'] total = occupancy['Total'] spaceReservation = occupancy.get('SpaceReservation', '') # We only take the first one, in case there are severals. # Most probably not ideal, because it would be nice to stay # consistent, but well... endpoint = endpointResult['Value'][0] results = { 'Endpoint': endpoint, 'Free': free, 'Total': total, 'SpaceReservation': spaceReservation, 'ElementName': elementName } result = self._storeCommand(results) if not result['OK']: return result return S_OK({'Free': free, 'Total': total})
def doCommand( self ): """ Returns simple jobs efficiency :attr:`args`: - args[0]: string: should be a ValidElement - args[1]: string should be the name of the ValidElement returns: { 'Result': 'Good'|'Fair'|'Poor'|'Idle'|'Bad' } """ if not 'siteName' in self.args: return self.returnERROR( S_ERROR( 'siteName is missing' ) ) siteName = self.args[ 'siteName' ] # If siteName is None, we take all sites if siteName is None: siteName = CSHelpers.getSites() if not siteName[ 'OK' ]: return self.returnERROR( siteName ) siteName = siteName[ 'Value' ] results = self.wmsAdmin.getSiteSummaryWeb( { 'Site' : siteName }, [], 0, 500 ) if not results[ 'OK' ]: return self.returnERROR( results ) results = results[ 'Value' ] if not 'ParameterNames' in results: return self.returnERROR( S_ERROR( 'Malformed result dictionary' ) ) params = results[ 'ParameterNames' ] if not 'Records' in results: return self.returnERROR( S_ERROR( 'Malformed result dictionary' ) ) records = results[ 'Records' ] jobResults = [] for record in records: jobDict = dict( zip( params , record )) try: jobDict[ 'Efficiency' ] = float( jobDict[ 'Efficiency' ] ) except KeyError, e: return self.returnERROR( S_ERROR( e ) ) except ValueError, e: return self.returnERROR( S_ERROR( e ) )
def _syncSites(self): ''' Sync sites: compares CS with DB and does the necessary modifications. ''' gLogger.info('-- Synchronizing sites --') # sites in CS res = CSHelpers.getSites() if not res['OK']: return res sitesCS = res['Value'] gLogger.verbose('%s sites found in CS' % len(sitesCS)) # sites in RSS result = self.rStatus.selectStatusElement('Site', 'Status', meta={'columns': ['Name']}) if not result['OK']: return result sitesDB = [siteDB[0] for siteDB in result['Value']] # Sites that are in DB but not (anymore) in CS toBeDeleted = list(set(sitesDB).difference(set(sitesCS))) gLogger.verbose('%s sites to be deleted' % len(toBeDeleted)) # Delete sites for siteName in toBeDeleted: deleteQuery = self.rStatus._extermineStatusElement( 'Site', siteName) gLogger.verbose('Deleting site %s' % siteName) if not deleteQuery['OK']: return deleteQuery # Sites that are in CS but not (anymore) in DB toBeAdded = list(set(sitesCS).difference(set(sitesDB))) gLogger.verbose('%s site entries to be added' % len(toBeAdded)) for site in toBeAdded: query = self.rStatus.addIfNotThereStatusElement('Site', 'Status', name=site, statusType='all', status=self.defaultStatus, elementType='Site', tokenOwner=self.tokenOwner, reason='Synchronized') if not query['OK']: return query return S_OK()
def _prepareCommand(self): ''' SpaceTokenOccupancy requires one argument: - elementName : <str> Given a (storage)elementName, we calculate its endpoint and spaceToken, which are used to query the srm interface. ''' if 'name' not in self.args: return S_ERROR('"name" not found in self.args') elementName = self.args['name'] endpoint = CSHelpers.getStorageElementEndpoint(elementName) if not endpoint['OK']: return endpoint endpoint = endpoint['Value'] spaceToken = CSHelpers.getSEToken(elementName) if not spaceToken['OK']: return spaceToken spaceToken = spaceToken['Value'] return S_OK((endpoint, spaceToken))