def initializeLesHouchesFileManagerHandler(serviceInfo): """ Initialize the service """ ops = Operations() res = ops.getOptionsDict("/Models") if not res["OK"]: return res templates = res["Value"] cfgPath = serviceInfo["serviceSectionPath"] location = "" location = ops.getValue("%s/BasePath" % cfgPath, location) if not location: gLogger.error("Path to LesHouches files not defined") return S_ERROR("Path to LesHouches files not defined in CS") missing = False global ModelsDict for template, tfile in templates.items(): ModelsDict[template] = {} ModelsDict[template]["file"] = tfile if not tfile: ModelsDict[template]["content"] = [""] continue file_path = os.path.join([location, tfile]) if not os.path.exists(file_path): gLogger.error("Missing %s" % file_path) missing = True break LesHouchesFile = open(file_path, "r") ModelsDict[template]["content"] = LesHouchesFile.readlines() LesHouchesFile.close() if missing: return S_ERROR("File missing") return S_OK()
def web_getLaunchpadSetupWithLFNs(self): """ Method obtain launchpad setup with pre-selected LFNs as input data parameter, the caller js client will use setup to open an new Launchpad """ # On the fly file catalog for advanced launchpad if not hasattr(self, 'fc'): userData = self.getSessionData() group = str(userData["user"]["group"]) vo = getVOForGroup(group) self.fc = FileCatalog(vo=vo) self.set_header('Content-type', 'text/plain') arguments = self.request.arguments gLogger.always("submit: incoming arguments %s to getLaunchpadSetupWithLFNs" % arguments) lfnList = str(arguments['path'][0]).split(',') # Modified for Eiscat # Checks if the experiments folder in lfn list has a rtg_def.m file at some subfolder gLogger.always("submit: checking if some rtg_def.m", arguments) processed = [] metaDict = {'type': 'info'} for lfn in lfnList: pos_relative = lfn.find("/") pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) experiment_lfn = lfn[0:pos_relative] if experiment_lfn in processed: continue processed.append(experiment_lfn) gLogger.always( "checking rtg_def.m in %s" % experiment_lfn ) result = self.fc.findFilesByMetadata( metaDict, path=str(experiment_lfn) ) if not result['OK'] or not result['Value']: gLogger.error( "Failed to get type info from $s, %s" % (experiment_lfn,result[ "Message" ]) ) continue for candidate_lfn in result['Value']: if candidate_lfn.find('rtg_def.m') > 0: lfnList.append(candidate_lfn) # End modified ptlfn = '' for lfn in lfnList: ptlfn += (', ' + lfn) if ptlfn else lfn params = self.defaultParams.copy() params["InputData"] = [1, ptlfn] obj = Operations(vo=vo) predefinedSets = {} launchpadSections = obj.getSections("Launchpad") if launchpadSections['OK']: for section in launchpadSections["Value"]: predefinedSets[section] = {} sectionOptions = obj.getOptionsDict("Launchpad/" + section) pprint.pprint(sectionOptions) if sectionOptions['OK']: predefinedSets[section] = sectionOptions["Value"] self.write({"success": "true", "result": params, "predefinedSets": predefinedSets})
def web_getLaunchpadOpts(self): defaultParams = {"JobName" : [1, 'DIRAC'], "Executable" : [1, "/bin/ls"], "Arguments" : [1, "-ltrA"], "OutputSandbox" : [1, "std.out, std.err"], "InputData" : [0, ""], "OutputData" : [0, ""], "OutputSE" : [0, "DIRAC-USER"], "OutputPath": [0, ""], "CPUTime" : [0, "86400"], "Site" : [0, ""], "BannedSite" : [0, ""], "Platform" : [0, "Linux_x86_64_glibc-2.5"], "Priority" : [0, "5"], "StdError" : [0, "std.err"], "StdOutput" : [0, "std.out"], "Parameters" : [0, "0"], "ParameterStart" : [0, "0"], "ParameterStep" : [0, "1"]} delimiter = gConfig.getValue("/Website/Launchpad/ListSeparator" , ',') options = self.__getOptionsFromCS(delimiter=delimiter) # platform = self.__getPlatform() # if platform and options: # if not options.has_key("Platform"): # options[ "Platform" ] = platform # else: # csPlatform = list(options[ "Platform" ]) # allPlatforms = csPlatform + platform # platform = uniqueElements(allPlatforms) # options[ "Platform" ] = platform gLogger.debug("Combined options from CS: %s" % options) override = gConfig.getValue("/Website/Launchpad/OptionsOverride" , False) gLogger.info("end __getLaunchpadOpts") # Updating the default values from OptionsOverride configuration branch for key in options: if key not in defaultParams: defaultParams[key] = [ 0, "" ] defaultParams[key][1] = options[key][0] # Reading of the predefined sets of launchpad parameters values obj = Operations() predefinedSets = {} launchpadSections = obj.getSections("Launchpad") import pprint if launchpadSections['OK']: for section in launchpadSections["Value"]: predefinedSets[section] = {} sectionOptions = obj.getOptionsDict("Launchpad/" + section) pprint.pprint(sectionOptions) if sectionOptions['OK']: predefinedSets[section] = sectionOptions["Value"] self.write({"success":"true", "result":defaultParams, "predefinedSets":predefinedSets})
class RssConfiguration: ''' RssConfiguration: { Config: { State : Active | InActive, Cache : 300, FromAddress : '*****@*****.**' StatusType : { default : all, StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess } } } ''' def __init__(self): self.opsHelper = Operations() def getConfigCache(self, default=300): ''' Gets from <pathToRSSConfiguration>/Config the value of Cache ''' return self.opsHelper.getValue('%s/Config/Cache' % _rssConfigPath, default) def getConfigFromAddress(self, default=None): ''' Gets from <pathToRSSConfiguration>/Config the value of FromAddress ''' return self.opsHelper.getValue( '%s/Config/FromAddress' % _rssConfigPath, default) def getConfigStatusType(self, elementType=None): ''' Gets all the status types per elementType, if not given, it takes default from CS. If not, hardcoded variable DEFAULT. ''' _DEFAULTS = ('all', ) res = self.opsHelper.getOptionsDict('%s/Config/StatusTypes' % _rssConfigPath) if res['OK']: if elementType in res['Value']: return List.fromChar(res['Value'][elementType]) if 'default' in res['Value']: return List.fromChar(res['Value']['default']) return _DEFAULTS
class RssConfiguration: ''' RssConfiguration: { Config: { State : Active | InActive, Cache : 300, FromAddress : '*****@*****.**' StatusType : { default : all, StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess } } } ''' def __init__( self ): self.opsHelper = Operations() def getConfigCache( self, default = 300 ): ''' Gets from <pathToRSSConfiguration>/Config the value of Cache ''' return self.opsHelper.getValue( '%s/Config/Cache' % _rssConfigPath, default ) def getConfigFromAddress( self, default = None ): ''' Gets from <pathToRSSConfiguration>/Config the value of FromAddress ''' return self.opsHelper.getValue( '%s/Config/FromAddress' % _rssConfigPath, default ) def getConfigStatusType( self, elementType = None ): ''' Gets all the status types per elementType, if not given, it takes default from CS. If not, hardcoded variable DEFAULT. ''' _DEFAULTS = ( 'all', ) res = self.opsHelper.getOptionsDict( '%s/Config/StatusTypes' % _rssConfigPath ) if res[ 'OK' ]: if elementType in res[ 'Value' ]: return List.fromChar( res[ 'Value' ][ elementType ] ) if 'default' in res[ 'Value' ]: return List.fromChar( res[ 'Value' ][ 'default' ] ) return _DEFAULTS
def __setupManagerProxies(self): """ setup grid proxy for all defined managers """ oHelper = Operations() shifters = oHelper.getSections("Shifter") if not shifters["OK"]: self.log.error(shifters["Message"]) return shifters shifters = shifters["Value"] for shifter in shifters: shifterDict = oHelper.getOptionsDict("Shifter/%s" % shifter) if not shifterDict["OK"]: self.log.error(shifterDict["Message"]) continue userName = shifterDict["Value"].get("User", "") userGroup = shifterDict["Value"].get("Group", "") userDN = CS.getDNForUsername(userName) if not userDN["OK"]: self.log.error(userDN["Message"]) continue userDN = userDN["Value"][0] vomsAttr = CS.getVOMSAttributeForGroup(userGroup) if vomsAttr: self.log.debug( "getting VOMS [%s] proxy for shifter %s@%s (%s)" % (vomsAttr, userName, userGroup, userDN)) getProxy = gProxyManager.downloadVOMSProxyToFile( userDN, userGroup, requiredTimeLeft=1200, cacheTime=4 * 43200) else: self.log.debug("getting proxy for shifter %s@%s (%s)" % (userName, userGroup, userDN)) getProxy = gProxyManager.downloadProxyToFile( userDN, userGroup, requiredTimeLeft=1200, cacheTime=4 * 43200) if not getProxy["OK"]: self.log.error(getProxy["Message"]) return S_ERROR("unable to setup shifter proxy for %s: %s" % (shifter, getProxy["Message"])) chain = getProxy["chain"] fileName = getProxy["Value"] self.log.debug("got %s: %s %s" % (shifter, userName, userGroup)) self.__managersDict[shifter] = { "ShifterDN": userDN, "ShifterName": userName, "ShifterGroup": userGroup, "Chain": chain, "ProxyFile": fileName } return S_OK()
def _getCatalogs( self ): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources for backward compatibility result = getVOfromProxyGroup() if not result['OK']: return result vo = result['Value'] opHelper = Operations( vo = vo ) result = opHelper.getSections( '/Services/FileCatalogs' ) fileCatalogs = [] operationsFlag = False if result['OK']: fileCatalogs = result['Value'] operationsFlag = True else: res = gConfig.getSections( self.rootConfigPath, listOrdered = True ) if not res['OK']: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error( errStr, res['Message'] ) return S_ERROR( errStr ) fileCatalogs = res['Value'] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails( catalogName ) if not res['OK']: return res catalogConfig = res['Value'] if operationsFlag: result = opHelper.getOptionsDict( '/Services/FileCatalogs/%s' % catalogName ) if not result['OK']: return result catalogConfig.update( result['Value'] ) if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject( catalogName ) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search( 'Read', catalogConfig['AccessType'] ): if master: self.readCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.readCatalogs.append( ( catalogName, oCatalog, master ) ) # If the catalog is write type if re.search( 'Write', catalogConfig['AccessType'] ): if master: self.writeCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.writeCatalogs.append( ( catalogName, oCatalog, master ) ) return S_OK()
def getSiteSEMapping(gridName=''): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} If gridName is specified, result is restricted to that Grid type. """ siteSEMapping = {} gridTypes = gConfig.getSections('Resources/Sites/') if not gridTypes['OK']: gLogger.warn('Problem retrieving sections in /Resources/Sites') return gridTypes gridTypes = gridTypes['Value'] if gridName: if not gridName in gridTypes: return S_ERROR('Could not get sections for /Resources/Sites/%s' % gridName) gridTypes = [gridName] gLogger.debug('Grid Types are: %s' % (', '.join(gridTypes))) for grid in gridTypes: sites = gConfig.getSections('/Resources/Sites/%s' % grid) if not sites['OK']: gLogger.warn('Problem retrieving /Resources/Sites/%s section' % grid) return sites for candidate in sites['Value']: candidateSEs = gConfig.getValue( '/Resources/Sites/%s/%s/SE' % (grid, candidate), []) if candidateSEs: siteSEMapping[candidate] = candidateSEs else: gLogger.debug('No SEs defined for site %s' % candidate) # Add Sites from the SiteLocalSEMapping in the CS cfgLocalSEPath = cfgPath('SiteLocalSEMapping') opsHelper = Operations() result = opsHelper.getOptionsDict(cfgLocalSEPath) if result['OK']: mapping = result['Value'] for site in mapping: ses = opsHelper.getValue(cfgPath(cfgLocalSEPath, site), []) if not ses: continue if gridName: if gridName != site.split('.')[0]: continue if site not in siteSEMapping: siteSEMapping[site] = [] for se in ses: if se not in siteSEMapping[site]: siteSEMapping[site].append(se) return S_OK(siteSEMapping)
def getSiteSEMapping( gridName = '' ): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} If gridName is specified, result is restricted to that Grid type. """ siteSEMapping = {} gridTypes = gConfig.getSections( 'Resources/Sites/' ) if not gridTypes['OK']: gLogger.warn( 'Problem retrieving sections in /Resources/Sites' ) return gridTypes gridTypes = gridTypes['Value'] if gridName: if not gridName in gridTypes: return S_ERROR( 'Could not get sections for /Resources/Sites/%s' % gridName ) gridTypes = [gridName] gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) ) for grid in gridTypes: sites = gConfig.getSections( '/Resources/Sites/%s' % grid ) if not sites['OK']: gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid ) return sites for candidate in sites['Value']: candidateSEs = gConfig.getValue( '/Resources/Sites/%s/%s/SE' % ( grid, candidate ), [] ) if candidateSEs: siteSEMapping[candidate] = candidateSEs else: gLogger.debug( 'No SEs defined for site %s' % candidate ) # Add Sites from the SiteLocalSEMapping in the CS cfgLocalSEPath = cfgPath( 'SiteLocalSEMapping' ) opsHelper = Operations() result = opsHelper.getOptionsDict( cfgLocalSEPath ) if result['OK']: mapping = result['Value'] for site in mapping: ses = opsHelper.getValue( cfgPath( cfgLocalSEPath, site ), [] ) if not ses: continue if gridName: if gridName != site.split( '.' )[0]: continue if site not in siteSEMapping: siteSEMapping[site] = [] for se in ses: if se not in siteSEMapping[site]: siteSEMapping[site].append( se ) return S_OK( siteSEMapping )
def __setupManagerProxies( self ): """ setup grid proxy for all defined managers """ oHelper = Operations() shifters = oHelper.getSections( "Shifter" ) if not shifters["OK"]: self.log.error( shifters["Message"] ) return shifters shifters = shifters["Value"] for shifter in shifters: shifterDict = oHelper.getOptionsDict( "Shifter/%s" % shifter ) if not shifterDict["OK"]: self.log.error( shifterDict["Message"] ) continue userName = shifterDict["Value"].get( "User", "" ) userGroup = shifterDict["Value"].get( "Group", "" ) userDN = CS.getDNForUsername( userName ) if not userDN["OK"]: self.log.error( userDN["Message"] ) continue userDN = userDN["Value"][0] vomsAttr = CS.getVOMSAttributeForGroup( userGroup ) if vomsAttr: self.log.debug( "getting VOMS [%s] proxy for shifter %s@%s (%s)" % ( vomsAttr, userName, userGroup, userDN ) ) getProxy = gProxyManager.downloadVOMSProxyToFile( userDN, userGroup, requiredTimeLeft = 1200, cacheTime = 4 * 43200 ) else: self.log.debug( "getting proxy for shifter %s@%s (%s)" % ( userName, userGroup, userDN ) ) getProxy = gProxyManager.downloadProxyToFile( userDN, userGroup, requiredTimeLeft = 1200, cacheTime = 4 * 43200 ) if not getProxy["OK"]: self.log.error( getProxy["Message" ] ) return S_ERROR( "unable to setup shifter proxy for %s: %s" % ( shifter, getProxy["Message"] ) ) chain = getProxy["chain"] fileName = getProxy["Value" ] self.log.debug( "got %s: %s %s" % ( shifter, userName, userGroup ) ) self.__managersDict[shifter] = { "ShifterDN" : userDN, "ShifterName" : userName, "ShifterGroup" : userGroup, "Chain" : chain, "ProxyFile" : fileName } return S_OK()
def getSEsForCountry( country ): """ Determines the associated SEs from the country code """ mappedCountries = [country] opsHelper = Operations() while True: mappedCountry = opsHelper.getValue( '/Countries/%s/AssignedTo' % country, country ) if mappedCountry == country: break elif mappedCountry in mappedCountries: return S_ERROR( 'Circular mapping detected for %s' % country ) else: country = mappedCountry mappedCountries.append( mappedCountry ) res = opsHelper.getOptionsDict( '/Countries/%s/AssociatedSEs' % country ) if not res['OK']: return S_ERROR( 'Failed to obtain AssociatedSEs for %s' % country ) return S_OK( res['Value'].values() )
def getSEsForCountry(country): """ Determines the associated SEs from the country code """ mappedCountries = [country] opsHelper = Operations() while True: mappedCountry = opsHelper.getValue( '/Countries/%s/AssignedTo' % country, country) if mappedCountry == country: break elif mappedCountry in mappedCountries: return S_ERROR('Circular mapping detected for %s' % country) else: country = mappedCountry mappedCountries.append(mappedCountry) res = opsHelper.getOptionsDict('/Countries/%s/AssociatedSEs' % country) if not res['OK']: return S_ERROR('Failed to obtain AssociatedSEs for %s' % country) return S_OK(res['Value'].values())
class FileCatalog: ro_methods = [ "exists", "isLink", "readLink", "isFile", "getFileMetadata", "getReplicas", "getReplicaStatus", "getFileSize", "isDirectory", "getDirectoryReplicas", "listDirectory", "getDirectoryMetadata", "getDirectorySize", "getDirectoryContents", "resolveDataset", "getPathPermissions", "getLFNForPFN", "getUsers", "getGroups", "getFileUserMetadata", ] write_methods = [ "createLink", "removeLink", "addFile", "setFileStatus", "addReplica", "removeReplica", "removeFile", "setReplicaStatus", "setReplicaHost", "createDirectory", "setDirectoryStatus", "removeDirectory", "removeDataset", "removeFileFromDataset", "createDataset", ] def __init__(self, catalogs=[], vo=None): """ Default constructor """ self.valid = True self.timeout = 180 self.readCatalogs = [] self.writeCatalogs = [] self.rootConfigPath = "/Resources/FileCatalogs" self.vo = vo if not vo: result = getVOfromProxyGroup() if not result["OK"]: return result self.vo = result["Value"] self.opHelper = Operations(vo=self.vo) if type(catalogs) in types.StringTypes: catalogs = [catalogs] if catalogs: res = self._getSelectedCatalogs(catalogs) else: res = self._getCatalogs() if not res["OK"]: self.valid = False elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0): self.valid = False def isOK(self): return self.valid def getReadCatalogs(self): return self.readCatalogs def getWriteCatalogs(self): return self.writeCatalogs def __getattr__(self, name): self.call = name if name in FileCatalog.write_methods: return self.w_execute elif name in FileCatalog.ro_methods: return self.r_execute else: raise AttributeError def __checkArgumentFormat(self, path): if type(path) in types.StringTypes: urls = {path: False} elif type(path) == types.ListType: urls = {} for url in path: urls[url] = False elif type(path) == types.DictType: urls = path else: return S_ERROR("FileCatalog.__checkArgumentFormat: Supplied path is not of the correct format.") return S_OK(urls) def w_execute(self, *parms, **kws): """ Write method executor. """ successful = {} failed = {} failedCatalogs = [] fileInfo = parms[0] res = self.__checkArgumentFormat(fileInfo) if not res["OK"]: return res fileInfo = res["Value"] allLfns = fileInfo.keys() for catalogName, oCatalog, master in self.writeCatalogs: method = getattr(oCatalog, self.call) res = method(fileInfo, **kws) if not res["OK"]: if master: # If this is the master catalog and it fails we dont want to continue with the other catalogs gLogger.error( "FileCatalog.w_execute: Failed to execute %s on master catalog %s." % (self.call, catalogName), res["Message"], ) return res else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs.append((catalogName, res["Message"])) else: for lfn, message in res["Value"]["Failed"].items(): # Save the error message for the failed operations if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop(lfn) for lfn, result in res["Value"]["Successful"].items(): # Save the result return for each file for the successful operations if not successful.has_key(lfn): successful[lfn] = {} successful[lfn][catalogName] = result # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs: for lfn in allLfns: if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = errorMessage resDict = {"Failed": failed, "Successful": successful} return S_OK(resDict) def r_execute(self, *parms, **kws): """ Read method executor. """ successful = {} failed = {} for _catalogName, oCatalog, _master in self.readCatalogs: method = getattr(oCatalog, self.call) res = method(*parms, **kws) if res["OK"]: if "Successful" in res["Value"]: for key, item in res["Value"]["Successful"].items(): if not successful.has_key(key): successful[key] = item if failed.has_key(key): failed.pop(key) for key, item in res["Value"]["Failed"].items(): if not successful.has_key(key): failed[key] = item if len(failed) == 0: resDict = {"Failed": failed, "Successful": successful} return S_OK(resDict) else: return res if (len(successful) == 0) and (len(failed) == 0): return S_ERROR("Failed to perform %s from any catalog" % self.call) resDict = {"Failed": failed, "Successful": successful} return S_OK(resDict) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog(self, catalogName, mode="Write", master=False): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject(catalogName) if not result["OK"]: return result oCatalog = result["Value"] if mode.lower().find("read") != -1: self.readCatalogs.append((catalogName, oCatalog, master)) if mode.lower().find("write") != -1: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def removeCatalog(self, catalogName): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range(len(self.readCatalogs)): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range(len(self.writeCatalogs)): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK("Catalog does not exist") def _getSelectedCatalogs(self, desiredCatalogs): for catalogName in desiredCatalogs: res = self._generateCatalogObject(catalogName) if not res["OK"]: return res oCatalog = res["Value"] self.readCatalogs.append((catalogName, oCatalog, True)) self.writeCatalogs.append((catalogName, oCatalog, True)) return S_OK() def _getCatalogs(self): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources for backward compatibility result = self.opHelper.getSections("/Services/Catalogs") fileCatalogs = [] operationsFlag = False if result["OK"]: fileCatalogs = result["Value"] operationsFlag = True else: res = gConfig.getSections(self.rootConfigPath, listOrdered=True) if not res["OK"]: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error(errStr, res["Message"]) return S_ERROR(errStr) fileCatalogs = res["Value"] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res["OK"]: return res catalogConfig = res["Value"] if operationsFlag: result = self.opHelper.getOptionsDict("/Services/Catalogs/%s" % catalogName) if not result["OK"]: return result catalogConfig.update(result["Value"]) if catalogConfig["Status"] == "Active": res = self._generateCatalogObject(catalogName) if not res["OK"]: return res oCatalog = res["Value"] master = catalogConfig["Master"] # If the catalog is read type if re.search("Read", catalogConfig["AccessType"]): if master: self.readCatalogs.insert(0, (catalogName, oCatalog, master)) else: self.readCatalogs.append((catalogName, oCatalog, master)) # If the catalog is write type if re.search("Write", catalogConfig["AccessType"]): if master: self.writeCatalogs.insert(0, (catalogName, oCatalog, master)) else: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def _getCatalogConfigDetails(self, catalogName): # First obtain the options that are available catalogConfigPath = "%s/%s" % (self.rootConfigPath, catalogName) res = gConfig.getOptions(catalogConfigPath) if not res["OK"]: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options." gLogger.error(errStr, catalogName) return S_ERROR(errStr) catalogConfig = {} for option in res["Value"]: configPath = "%s/%s" % (catalogConfigPath, option) optionValue = gConfig.getValue(configPath) catalogConfig[option] = optionValue # The 'Status' option should be defined (default = 'Active') if not catalogConfig.has_key("Status"): warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined." gLogger.warn(warnStr, catalogName) catalogConfig["Status"] = "Active" # The 'AccessType' option must be defined if not catalogConfig.has_key("AccessType"): errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined." gLogger.error(errStr, catalogName) return S_ERROR(errStr) # Anything other than 'True' in the 'Master' option means it is not if not catalogConfig.has_key("Master"): catalogConfig["Master"] = False elif catalogConfig["Master"] == "True": catalogConfig["Master"] = True else: catalogConfig["Master"] = False return S_OK(catalogConfig) def _generateCatalogObject(self, catalogName): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue("/LocalSite/Catalogs/%s/UseProxy" % catalogName, False) if not useProxy: useProxy = self.opHelper.getValue("/Services/Catalogs/%s/UseProxy" % catalogName, False) return FileCatalogFactory().createCatalog(catalogName, useProxy)
class Whizard2( LCApplication ): """ Whizard2 Application Class """ def __init__(self, paramdict = None): self.randomSeed = -1 self.eventType = '' self.whizard2SinFile = '' super(Whizard2, self).__init__( paramdict ) ##Those 5 need to come after default constructor self._modulename = 'Whizard2Analysis' self._moduledescription = 'Module to run Whizard2' self.appname = 'whizard2' self.datatype = 'GEN' self._paramsToExclude.extend( [ "outputDstPath", "outputRecPath", "OutputDstFile", "OutputRecFile" ] ) self._ops = Operations() self._decayProc = ['decay_proc'] self._integratedProcess = '' def setRandomSeed(self, randomSeed): """ Optional: Define random seed to use. Default is the jobID. :param int randomSeed: Seed to use during generation. """ self._checkArgs( { 'randomSeed' : types.IntType } ) self.randomSeed = randomSeed def setEvtType(self, evttype): """ Define process. If the process given is not found, when calling :func:`UserJob.append() <ILCDIRAC.Interfaces.API.NewInterface.UserJob.UserJob.append>` a full list is printed. :param str evttype: Process to generate """ self._checkArgs( { 'evttype' : types.StringTypes } ) if self.addedtojob: return self._reportError("Cannot modify this attribute once application has been added to Job") self.eventType = evttype return S_OK() def setProcessVariables(self, processes): """ Set the list of processes to simulate The sindarin file will be modified later on to call **simulate (proc_a, proc_b)**. The process variables have to be defined in the sindarin file:: process proc_a ... process proc_b ... .. versionadded:: v28r0p6 :param processes: which processes to call simulate on, by default 'decay_proc' :type processes: list, str """ if isinstance(processes, basestring): self._decayProc = [proc.strip() for proc in processes.split(',')] return S_OK() elif isinstance(processes, (set, list, tuple)): self._decayProc = [proc.strip() for proc in processes] return S_OK() return self._reportError("Cannot handle this argument type") def setSinFile(self, whizard2SinFilePath): """ Set the Whizard2 options to be used Usage: - Give path to the Whizard2 steering file. - IMPORTANT: set **seed** via iLCDirac API -> `Whizard2.setRandomSeed` - IMPORTANT: set **n_events** via iLCDirac API -> `Whizard2.setNumberOfEvents` - IMPORTANT: set **OutputFile** via iLCDirac API -> `Whizard2.setOutputFile` - The variables in which processes are defined which should be simulated can be set via `Whizard2.setProcessVariables` :param str whizard2SinFilePath: Path to the whizard2 sin file. """ self._checkArgs( { 'whizard2SinFilePath' : types.StringType } ) # Chech if file exist if not os.path.isfile(whizard2SinFilePath): return self._reportError('Whizard2 Sin file does not exist!') # Read file self.whizard2SinFile = open(whizard2SinFilePath).read() if "n_events" in self.whizard2SinFile: return self._reportError('Do not set n_events in the sin file, set it via the iLCDirac API') if "seed" in self.whizard2SinFile: return self._reportError('Do not set seed in the sin file, set it via the iLCDirac API') if "simulate(" in self.whizard2SinFile.replace(" ", ""): return self._reportError('Do not call "simulate ()" in the sin file, this is done by iLCDirac') return None def setIntegratedProcess(self, integrationTarball): """Make whizard2 use an already integrated process. .. warning :: It is the responsibility of the user to ensure that the sindarin file is compatible with the integrated process The integrationTarball has to be a tarball (zip, tar.gz, tgz), either an LFN, or a process defined in the configuration system. Use `getKnownProcesses` to see the list of defined processes >>> whizard2.setIntegratedProcess('bbcbbc_3tev_negPol') # processes defined in the configuration >>> whizard2.setIntegratedProcess('LFN:/ilc/user/u/username/bbcbbc_3tev_negPol.tar.gz') # tarball on the grid :param str integrationTarball: integrated process to be used for event generation """ self._checkArgs({'integrationTarball': types.StringTypes}) # file on the grid if integrationTarball.lower().startswith('lfn:'): LOG.info('Integrated process file is an LFN, adding it to the sandbox') self.inputSB.append(integrationTarball) # as the tarball is automatically extracted during the workflow, we do not have to do anything self._integratedProcess = '' return S_OK() knownProcesses = self.getKnownProcesses() if not knownProcesses['OK']: return self._reportError('Failed to get known integrated processes: %s' % knownProcesses['Message']) elif integrationTarball in knownProcesses['Value']: self._integratedProcess = integrationTarball else: return self._reportError('Unknown integrated process in %s: %s (available are: %s)' % (self.appname, integrationTarball, ', '.join(knownProcesses['Value'].keys()))) return S_OK() def getKnownProcesses(self, version=None): """Return a list of known integrated processes. :param str version: Optional: Software version for which to print the integrated processes. If not given the version of the application instance is used. :returns: S_OK with list of integrated processes known for this software version, S_ERROR """ if version is None and not self.version: return S_ERROR('No software version defined') version = self.version if version is None else version processes = self._ops.getOptionsDict('/AvailableTarBalls/%s/whizard2/%s/integrated_processes/processes' % ('x86_64-slc5-gcc43-opt', self.version)) return processes def _userjobmodules(self, stepdefinition): res1 = self._setApplicationModuleAndParameters(stepdefinition) res2 = self._setUserJobFinalization(stepdefinition) if not res1["OK"] or not res2["OK"] : return S_ERROR('userjobmodules failed') return S_OK() def _prodjobmodules(self, stepdefinition): res1 = self._setApplicationModuleAndParameters(stepdefinition) res2 = self._setOutputComputeDataList(stepdefinition) if not res1["OK"] or not res2["OK"] : return S_ERROR('prodjobmodules failed') return S_OK() def _checkConsistency(self, job=None): """ FIXME Check consistency of the Whizard2 application, this is called from the `Job` instance :param job: The instance of the job :type job: ~ILCDIRAC.Interfaces.API.NewInterface.Job.Job :returns: S_OK/S_ERROR """ if not self.version: return S_ERROR('No version found!') if not self.whizard2SinFile: return S_ERROR('No sin file set!') if not self.numberOfEvents : return S_ERROR('Number of events not set!') for process in self._decayProc: if process not in self.whizard2SinFile: return S_ERROR('Process "%s" not found in sindarin file, please check your inputs' % process) if self._jobtype != 'User': self._listofoutput.append({"outputFile":"@{OutputFile}", "outputPath":"@{OutputPath}", "outputDataSE":'@{OutputSE}'}) if self.eventType != '': self.prodparameters['Process'] = self.eventType else: return S_ERROR('evttype not set, please set event type!') self.prodparameters['nbevts'] = self.numberOfEvents parsedString = self.whizard2SinFile.replace(" ", "").split() sqrtMatches = [ x for x in parsedString if x.startswith('sqrts=') and x.endswith('GeV') ] if not sqrtMatches: return S_ERROR('No energy set in sin file, please set "sqrts=...GeV"') elif len(sqrtMatches) != 1: return S_ERROR('Multiple instances of "sqrts=..GeV" detected, only one can be processed') if not self.energy: self.prodparameters['Energy'] = sqrtMatches[0].replace("sqrts=", "").replace("GeV", "") self.energy = float(self.prodparameters['Energy']) else: self.whizard2SinFile = re.sub(r"sqrts *= *[0-9.]* *GeV", "sqrts = %s GeV" % self.energy, self.whizard2SinFile) self.prodparameters['Energy'] = str(self.energy) self.prodparameters['SinFile'] = self.whizard2SinFile modelMatches = [ x for x in parsedString if x.startswith('model=') ] if not modelMatches: return S_ERROR('No model set in sin file, please set "model=..."') elif len(modelMatches) != 1: return S_ERROR('Multiple instances of "model=..." detected, only one can be processed') self.prodparameters['Model'] = modelMatches[0].replace("model=", "") return S_OK() def _applicationModule(self): md1 = self._createModuleDefinition() md1.addParameter(Parameter("randomSeed", 0, "int", "", "", False, False, "Random seed for the generator")) md1.addParameter(Parameter("debug", False, "bool", "", "", False, False, "debug mode")) md1.addParameter(Parameter("whizard2SinFile", '', "string", "", "", False, False, "Whizard2 steering options")) md1.addParameter(Parameter("decayProc", [], "list", "", "", False, False, "processses to simulate")) md1.addParameter(Parameter('integratedProcess', '', 'string', '', '', False, False, 'Integrated Process to use')) return md1 def _applicationModuleValues(self, moduleinstance): moduleinstance.setValue("randomSeed", self.randomSeed) moduleinstance.setValue("debug", self.debug) moduleinstance.setValue("whizard2SinFile", self.whizard2SinFile) moduleinstance.setValue("decayProc", self._decayProc) moduleinstance.setValue('integratedProcess', self._integratedProcess) def _checkWorkflowConsistency(self): return self._checkRequiredApp()
class InputDataResolution: """ ILC specific input data resolution, imported from DIRAC """ ############################################################################# def __init__(self, argumentsDict): """ Standard constructor """ self.arguments = argumentsDict self.name = COMPONENT_NAME self.log = gLogger.getSubLogger(self.name) self.ops = Operations() ############################################################################# def execute(self): """Given the arguments from the Job Wrapper, this function calls existing utilities in DIRAC to resolve input data according to LHCb VO policy. """ result = self.__resolveInputData() if not result['OK']: self.log.error( 'InputData resolution failed with result:\n%s' % (result)) #For local running of this module we can expose an option to ignore missing files ignoreMissing = False if self.arguments.has_key('IgnoreMissing'): ignoreMissing = self.arguments['IgnoreMissing'] #For LHCb original policy was as long as one TURL exists, this can be conveyed to the application #this breaks due to the stripping so the policy has been changed. if result.has_key('Failed'): failedReplicas = result['Failed'] if failedReplicas and not ignoreMissing: self.log.error( 'Failed to obtain access to the following files:\n%s' % (string.join(failedReplicas, '\n'))) return S_ERROR('Failed to access all of requested input data') if not result.has_key('Successful'): return result if not result['Successful']: return S_ERROR('Could not access any requested input data') return result ############################################################################# def __resolveInputData(self): """This method controls the execution of the DIRAC input data modules according to the ILC VO policy defined in the configuration service. """ if self.arguments['Configuration'].has_key('SiteName'): site = self.arguments['Configuration']['SiteName'] else: site = DIRAC.siteName() policy = [] if not self.arguments.has_key('Job'): self.arguments['Job'] = {} if self.arguments['Job'].has_key('InputDataPolicy'): policy = self.arguments['Job']['InputDataPolicy'] #In principle this can be a list of modules with the first taking precedence if type(policy) in types.StringTypes: policy = [policy] self.log.info('Job has a specific policy setting: %s' % (string.join(policy, ', '))) else: self.log.verbose( 'Attempting to resolve input data policy for site %s' % site) inputDataPolicy = self.ops.getOptionsDict('/InputDataPolicy') if not inputDataPolicy: return S_ERROR( 'Could not resolve InputDataPolicy from /InputDataPolicy') options = inputDataPolicy['Value'] if options.has_key(site): policy = options[site] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Found specific input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) elif options.has_key('Default'): policy = options['Default'] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Applying default input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) dataToResolve = None #if none, all supplied input data is resolved allDataResolved = False successful = {} failedReplicas = [] for modulePath in policy: if not allDataResolved: result = self.__runModule(modulePath, dataToResolve) if not result['OK']: self.log.warn('Problem during %s execution' % modulePath) return result if result.has_key('Failed'): failedReplicas = result['Failed'] if failedReplicas: self.log.info( '%s failed for the following files:\n%s' % (modulePath, string.join(failedReplicas, '\n'))) dataToResolve = failedReplicas else: self.log.info('All replicas resolved after %s execution' % (modulePath)) allDataResolved = True successful.update(result['Successful']) self.log.verbose(successful) result = S_OK() result['Successful'] = successful result['Failed'] = failedReplicas return result ############################################################################# def __runModule(self, modulePath, remainingReplicas): """This method provides a way to run the modules specified by the VO that govern the input data access policy for the current site. For LHCb the standard WMS modules are applied in a different order depending on the site. """ self.log.info('Attempting to run %s' % (modulePath)) moduleFactory = ModuleFactory() moduleInstance = moduleFactory.getModule(modulePath, self.arguments) if not moduleInstance['OK']: return moduleInstance module = moduleInstance['Value'] result = module.execute(remainingReplicas) return result
class FileCatalog( object ): def __init__( self, catalogs = None, vo = None ): """ Default constructor """ self.valid = True self.timeout = 180 self.ro_methods = set() self.write_methods = set() self.no_lfn_methods = set() self.readCatalogs = [] self.writeCatalogs = [] self.rootConfigPath = '/Resources/FileCatalogs' self.vo = vo if vo else getVOfromProxyGroup().get( 'Value', None ) self.log = gLogger.getSubLogger( "FileCatalog" ) self.opHelper = Operations( vo = self.vo ) catalogList = [] if isinstance( catalogs, basestring ): catalogList = [catalogs] elif isinstance( catalogs, ( list, tuple ) ): catalogList = list( catalogs ) if catalogList: result = self._getEligibleCatalogs() if not result['OK']: self.log.error( "Failed to get eligible catalog" ) return eligibleFileCatalogs = result['Value'] catalogCheck = True for catalog in catalogList: if catalog not in eligibleFileCatalogs: self.log.error( "Specified catalog is not eligible", catalog ) catalogCheck = False if catalogCheck: result = self._getSelectedCatalogs( catalogList ) else: result = S_ERROR( "Specified catalog is not eligible" ) else: result = self._getCatalogs() if not result['OK']: self.log.error( "Failed to create catalog objects" ) self.valid = False elif ( len( self.readCatalogs ) == 0 ) and ( len( self.writeCatalogs ) == 0 ): self.log.error( "No catalog object created" ) self.valid = False result = self.getMasterCatalogNames() masterCatalogs = result['Value'] # There can not be more than one master catalog haveMaster = False if len( masterCatalogs ) > 1: self.log.error( "More than one master catalog created" ) self.valid = False elif len( masterCatalogs ) == 1: haveMaster = True # Get the list of write methods if haveMaster: # All the write methods must be present in the master _catalogName, oCatalog, _master = self.writeCatalogs[0] _roList, writeList, nolfnList = oCatalog.getInterfaceMethods() self.write_methods.update( writeList ) self.no_lfn_methods.update( nolfnList ) else: for _catalogName, oCatalog, _master in self.writeCatalogs: _roList, writeList, nolfnList = oCatalog.getInterfaceMethods() self.write_methods.update( writeList ) self.no_lfn_methods.update( nolfnList ) # Get the list of read methods for _catalogName, oCatalog, _master in self.readCatalogs: roList, _writeList, nolfnList = oCatalog.getInterfaceMethods() self.ro_methods.update( roList ) self.no_lfn_methods.update( nolfnList ) self.condParser = FCConditionParser( vo = self.vo, ro_methods = self.ro_methods ) def isOK( self ): return self.valid def getReadCatalogs( self ): return self.readCatalogs def getWriteCatalogs( self ): return self.writeCatalogs def getMasterCatalogNames( self ): """ Returns the list of names of the Master catalogs """ masterNames = [catalogName for catalogName, oCatalog, master in self.writeCatalogs if master] return S_OK( masterNames ) def __getattr__( self, name ): self.call = name if name in self.write_methods: return self.w_execute elif name in self.ro_methods: return self.r_execute else: raise AttributeError def w_execute( self, *parms, **kws ): """ Write method executor. If one of the LFNs given as input does not pass a condition defined for the master catalog, we return S_ERROR without trying anything else :param fcConditions: either a dict or a string, to be propagated to the FCConditionParser If it is a string, it is given for all catalogs If it is a dict, it has to be { catalogName: condition}, and only the specific condition for the catalog will be given CAUTION !!! If the method is a write no_lfn method, then the return value are completely different We only return the result of the master catalog """ successful = {} failed = {} failedCatalogs = {} successfulCatalogs = {} specialConditions = kws.pop( 'fcConditions' ) if 'fcConditions' in kws else None allLfns = [] lfnMapDict = {} masterResult = {} parms1 = [] if self.call not in self.no_lfn_methods: fileInfo = parms[0] result = checkArgumentFormat( fileInfo, generateMap = True ) if not result['OK']: return result fileInfo, lfnMapDict = result['Value'] # No need to check the LFNs again in the clients kws['LFNChecking'] = False allLfns = fileInfo.keys() parms1 = parms[1:] for catalogName, oCatalog, master in self.writeCatalogs: # Skip if the method is not implemented in this catalog # NOTE: it is impossible for the master since the write method list is populated # only from the master catalog, and if the method is not there, __getattr__ # would raise an exception if not oCatalog.hasCatalogMethod( self.call ): continue method = getattr( oCatalog, self.call ) if self.call in self.no_lfn_methods: result = method( *parms, **kws ) else: if isinstance( specialConditions, dict ): condition = specialConditions.get( catalogName ) else: condition = specialConditions # Check whether this catalog should be used for this method res = self.condParser( catalogName, self.call, fileInfo, condition = condition ) # condParser never returns S_ERROR condEvals = res['Value']['Successful'] # For a master catalog, ALL the lfns should be valid if master: if any([not valid for valid in condEvals.values()]): gLogger.error( "The master catalog is not valid for some LFNS", condEvals ) return S_ERROR( "The master catalog is not valid for some LFNS %s" % condEvals ) validLFNs = dict( ( lfn, fileInfo[lfn] ) for lfn in condEvals if condEvals[lfn] ) invalidLFNs = [lfn for lfn in condEvals if not condEvals[lfn]] if invalidLFNs: gLogger.debug( "Some LFNs are not valid for operation '%s' on catalog '%s' : %s" % ( self.call, catalogName, invalidLFNs ) ) result = method( validLFNs, *parms1, **kws ) if master: masterResult = result if not result['OK']: if master: # If this is the master catalog and it fails we don't want to continue with the other catalogs self.log.error( "Failed to execute call on master catalog", "%s on %s: %s" % ( self.call, catalogName, result['Message'] ) ) return result else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs[catalogName] = result['Message'] else: successfulCatalogs[catalogName] = result['Value'] if allLfns: if result['OK']: for lfn, message in result['Value']['Failed'].items(): # Save the error message for the failed operations failed.setdefault( lfn, {} )[catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop( lfn, None ) for lfn, result in result['Value']['Successful'].items(): # Save the result return for each file for the successful operations successful.setdefault( lfn, {} )[catalogName] = result if allLfns: # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs.items(): for lfn in allLfns: failed.setdefault( lfn, {} )[catalogName] = errorMessage # Restore original lfns if they were changed by normalization if lfnMapDict: for lfn in failed.keys(): failed[lfnMapDict.get( lfn, lfn )] = failed.pop( lfn ) for lfn in successful.keys(): successful[lfnMapDict.get( lfn, lfn )] = successful.pop( lfn ) resDict = {'Failed':failed, 'Successful':successful} return S_OK( resDict ) else: # FIXME: Return just master result here. This is temporary as more detailed # per catalog result needs multiple fixes in various client calls return masterResult def r_execute( self, *parms, **kws ): """ Read method executor. """ successful = {} failed = {} for _catalogName, oCatalog, _master in self.readCatalogs: # Skip if the method is not implemented in this catalog if not oCatalog.hasCatalogMethod( self.call ): continue method = getattr( oCatalog, self.call ) res = method( *parms, **kws ) if res['OK']: if 'Successful' in res['Value']: for key, item in res['Value']['Successful'].items(): successful.setdefault( key, item ) failed.pop( key, None ) for key, item in res['Value']['Failed'].items(): if key not in successful: failed[key] = item else: return res if not successful and not failed: return S_ERROR( DErrno.EFCERR, "Failed to perform %s from any catalog" % self.call ) return S_OK( {'Failed':failed, 'Successful':successful} ) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog( self, catalogName, mode = "Write", master = False ): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject( catalogName ) if not result['OK']: return result oCatalog = result['Value'] if mode.lower().find( "read" ) != -1: self.readCatalogs.append( ( catalogName, oCatalog, master ) ) if mode.lower().find( "write" ) != -1: self.writeCatalogs.append( ( catalogName, oCatalog, master ) ) return S_OK() def removeCatalog( self, catalogName ): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range( len( self.readCatalogs ) ): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range( len( self.writeCatalogs ) ): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK( 'Catalog does not exist' ) def _getSelectedCatalogs( self, desiredCatalogs ): for catalogName in desiredCatalogs: result = self._getCatalogConfigDetails( catalogName ) if not result['OK']: return result catalogConfig = result['Value'] result = self._generateCatalogObject( catalogName ) if not result['OK']: return result oCatalog = result['Value'] if re.search( 'Read', catalogConfig['AccessType'] ): if catalogConfig['Master']: self.readCatalogs.insert( 0, ( catalogName, oCatalog, catalogConfig['Master'] ) ) else: self.readCatalogs.append( ( catalogName, oCatalog, catalogConfig['Master'] ) ) if re.search( 'Write', catalogConfig['AccessType'] ): if catalogConfig['Master']: self.writeCatalogs.insert( 0, ( catalogName, oCatalog, catalogConfig['Master'] ) ) else: self.writeCatalogs.append( ( catalogName, oCatalog, catalogConfig['Master'] ) ) return S_OK() def _getEligibleCatalogs( self ): """ Get a list of eligible catalogs :return: S_OK/S_ERROR, Value - a list of catalog names """ # First, look in the Operations, if nothing defined look in /Resources for backward compatibility fileCatalogs = self.opHelper.getValue( '/Services/Catalogs/CatalogList', [] ) if not fileCatalogs: result = self.opHelper.getSections( '/Services/Catalogs' ) if result['OK']: fileCatalogs = result['Value'] else: res = gConfig.getSections( self.rootConfigPath, listOrdered = True ) if not res['OK']: errStr = "FileCatalog._getEligibleCatalogs: Failed to get file catalog configuration." self.log.error( errStr, res['Message'] ) return S_ERROR( errStr ) fileCatalogs = res['Value'] return S_OK( fileCatalogs ) def _getCatalogs( self ): """ Updates self.readCatalogs and self.writeCatalogs with list of catalog objects as found in the CS """ # Get the eligible catalogs first result = self._getEligibleCatalogs() if not result['OK']: return result fileCatalogs = result['Value'] # Get the catalog objects now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails( catalogName ) if not res['OK']: return res catalogConfig = res['Value'] if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject( catalogName ) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search( 'Read', catalogConfig['AccessType'] ): if master: self.readCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.readCatalogs.append( ( catalogName, oCatalog, master ) ) # If the catalog is write type if re.search( 'Write', catalogConfig['AccessType'] ): if master: self.writeCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.writeCatalogs.append( ( catalogName, oCatalog, master ) ) return S_OK() def _getCatalogConfigDetails( self, catalogName ): # First obtain the options that are available catalogConfigPath = '%s/%s' % ( self.rootConfigPath, catalogName ) result = gConfig.getOptionsDict( catalogConfigPath ) if not result['OK']: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options." self.log.error( errStr, catalogName ) return S_ERROR( errStr ) catalogConfig = result['Value'] result = self.opHelper.getOptionsDict( '/Services/Catalogs/%s' % catalogName ) if result['OK']: catalogConfig.update( result['Value'] ) # The 'Status' option should be defined (default = 'Active') if 'Status' not in catalogConfig: warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined." self.log.warn( warnStr, catalogName ) catalogConfig['Status'] = 'Active' # The 'AccessType' option must be defined if 'AccessType' not in catalogConfig: errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined." self.log.error( errStr, catalogName ) return S_ERROR( errStr ) # Anything other than 'True' in the 'Master' option means it is not catalogConfig['Master'] = ( catalogConfig.setdefault( 'Master', False ) == 'True' ) return S_OK( catalogConfig ) def _generateCatalogObject( self, catalogName ): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue( '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False ) if not useProxy: useProxy = self.opHelper.getValue( '/Services/Catalogs/%s/UseProxy' % catalogName, False ) return FileCatalogFactory().createCatalog( catalogName, useProxy )
class FileCatalog: ro_methods = [ 'exists', 'isLink', 'readLink', 'isFile', 'getFileMetadata', 'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents', 'resolveDataset', 'getPathPermissions', 'getLFNForPFN', 'getUsers', 'getGroups', 'getFileUserMetadata' ] write_methods = [ 'createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica', 'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost', 'createDirectory', 'setDirectoryStatus', 'removeDirectory', 'removeDataset', 'removeFileFromDataset', 'createDataset' ] def __init__(self, catalogs=[], vo=None): """ Default constructor """ self.valid = True self.timeout = 180 self.readCatalogs = [] self.writeCatalogs = [] self.vo = vo if not vo: result = getVOfromProxyGroup() if not result['OK']: return result self.vo = result['Value'] self.opHelper = Operations(vo=self.vo) self.reHelper = Resources(vo=self.vo) if type(catalogs) in types.StringTypes: catalogs = [catalogs] if catalogs: res = self._getSelectedCatalogs(catalogs) else: res = self._getCatalogs() if not res['OK']: self.valid = False elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0): self.valid = False def isOK(self): return self.valid def getReadCatalogs(self): return self.readCatalogs def getWriteCatalogs(self): return self.writeCatalogs def __getattr__(self, name): self.call = name if name in FileCatalog.write_methods: return self.w_execute elif name in FileCatalog.ro_methods: return self.r_execute else: raise AttributeError def w_execute(self, *parms, **kws): """ Write method executor. """ successful = {} failed = {} failedCatalogs = [] fileInfo = parms[0] res = checkArgumentFormat(fileInfo) if not res['OK']: return res fileInfo = res['Value'] allLfns = fileInfo.keys() for catalogName, oCatalog, master in self.writeCatalogs: method = getattr(oCatalog, self.call) res = method(fileInfo, **kws) if not res['OK']: if master: # If this is the master catalog and it fails we dont want to continue with the other catalogs gLogger.error( "FileCatalog.w_execute: Failed to execute %s on master catalog %s." % (self.call, catalogName), res['Message']) return res else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs.append((catalogName, res['Message'])) else: for lfn, message in res['Value']['Failed'].items(): # Save the error message for the failed operations if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop(lfn) for lfn, result in res['Value']['Successful'].items(): # Save the result return for each file for the successful operations if not successful.has_key(lfn): successful[lfn] = {} successful[lfn][catalogName] = result # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs: for lfn in allLfns: if not failed.has_key(lfn): failed[lfn] = {} failed[lfn][catalogName] = errorMessage resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) def r_execute(self, *parms, **kws): """ Read method executor. """ successful = {} failed = {} for _catalogName, oCatalog, _master in self.readCatalogs: method = getattr(oCatalog, self.call) res = method(*parms, **kws) if res['OK']: if 'Successful' in res['Value']: for key, item in res['Value']['Successful'].items(): if not successful.has_key(key): successful[key] = item if failed.has_key(key): failed.pop(key) for key, item in res['Value']['Failed'].items(): if not successful.has_key(key): failed[key] = item if len(failed) == 0: resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) else: return res if (len(successful) == 0) and (len(failed) == 0): return S_ERROR("Failed to perform %s from any catalog" % self.call) resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog(self, catalogName, mode="Write", master=False): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject(catalogName) if not result['OK']: return result oCatalog = result['Value'] if mode.lower().find("read") != -1: self.readCatalogs.append((catalogName, oCatalog, master)) if mode.lower().find("write") != -1: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def removeCatalog(self, catalogName): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range(len(self.readCatalogs)): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range(len(self.writeCatalogs)): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK('Catalog does not exist') def _getSelectedCatalogs(self, desiredCatalogs): for catalogName in desiredCatalogs: res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] self.readCatalogs.append((catalogName, oCatalog, True)) self.writeCatalogs.append((catalogName, oCatalog, True)) return S_OK() def _getCatalogs(self): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources result = self.opHelper.getSections('/Services/Catalogs') fileCatalogs = [] operationsFlag = False optCatalogDict = {} if result['OK']: fcs = result['Value'] for fc in fcs: fName = self.opHelper.getValue( '/Services/Catalogs/%s/CatalogName' % fc, fc) fileCatalogs.append(fName) optCatalogDict[fName] = fc operationsFlag = True else: res = self.reHelper.getEligibleResources('Catalog') if not res['OK']: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error(errStr, res['Message']) return S_ERROR(errStr) fileCatalogs = res['Value'] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res['OK']: return res catalogConfig = res['Value'] if operationsFlag: result = self.opHelper.getOptionsDict( '/Services/Catalogs/%s' % optCatalogDict[catalogName]) if not result['OK']: return result catalogConfig.update(result['Value']) if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search('Read', catalogConfig['AccessType']): if master: self.readCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.readCatalogs.append( (catalogName, oCatalog, master)) # If the catalog is write type if re.search('Write', catalogConfig['AccessType']): if master: self.writeCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.writeCatalogs.append( (catalogName, oCatalog, master)) return S_OK() def _getCatalogConfigDetails(self, catalogName): # First obtain the options that are available result = self.reHelper.getCatalogOptionsDict(catalogName) if not result['OK']: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options" gLogger.error(errStr, catalogName) return S_ERROR(errStr) catalogConfig = result['Value'] # The 'Status' option should be defined (default = 'Active') if not catalogConfig.has_key('Status'): warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined" gLogger.warn(warnStr, catalogName) catalogConfig['Status'] = 'Active' # The 'AccessType' option must be defined if not catalogConfig.has_key('AccessType'): errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined" gLogger.error(errStr, catalogName) return S_ERROR(errStr) # Anything other than 'True' in the 'Master' option means it is not if not catalogConfig.has_key('Master'): catalogConfig['Master'] = False elif catalogConfig['Master'] == 'True': catalogConfig['Master'] = True else: catalogConfig['Master'] = False return S_OK(catalogConfig) def _generateCatalogObject(self, catalogName): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue( '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False) if not useProxy: useProxy = self.opHelper.getValue( '/Services/Catalogs/%s/UseProxy' % catalogName, False) return FileCatalogFactory().createCatalog(catalogName, useProxy)
class FileCatalog(object): ro_methods = [ 'exists', 'isLink', 'readLink', 'isFile', 'getFileMetadata', 'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents', 'resolveDataset', 'getPathPermissions', 'getLFNForPFN', 'getUsers', 'getGroups', 'getLFNForGUID' ] ro_meta_methods = [ 'getFileUserMetadata', 'getMetadataFields', 'findFilesByMetadata', 'getFileUserMetadata', 'findDirectoriesByMetadata', 'getReplicasByMetadata', 'findFilesByMetadataDetailed', 'findFilesByMetadataWeb', 'getCompatibleMetadata', 'getMetadataSet' ] ro_methods += ro_meta_methods write_methods = [ 'createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica', 'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost', 'setReplicaProblematic', 'createDirectory', 'setDirectoryStatus', 'removeDirectory', 'removeDataset', 'removeFileFromDataset', 'createDataset', 'changePathMode', 'changePathOwner', 'changePathGroup' ] write_meta_methods = [ 'addMetadataField', 'deleteMetadataField', 'setMetadata', 'setMetadataBulk', 'removeMetadata', 'addMetadataSet' ] write_methods += write_meta_methods def __init__(self, catalogs=None, vo=None): """ Default constructor """ self.valid = True self.timeout = 180 self.readCatalogs = [] self.writeCatalogs = [] self.metaCatalogs = [] self.rootConfigPath = '/Resources/FileCatalogs' self.vo = vo if vo else getVOfromProxyGroup().get('Value', None) self.opHelper = Operations(vo=self.vo) if catalogs is None: catalogList = [] elif type(catalogs) in types.StringTypes: catalogList = [catalogs] else: catalogList = catalogs if catalogList: res = self._getSelectedCatalogs(catalogList) else: res = self._getCatalogs() if not res['OK']: self.valid = False elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0): self.valid = False def isOK(self): return self.valid def getReadCatalogs(self): return self.readCatalogs def getWriteCatalogs(self): return self.writeCatalogs def getMasterCatalogNames(self): """ Returns the list of names of the Master catalogs """ masterNames = [ catalogName for catalogName, oCatalog, master in self.writeCatalogs if master ] return S_OK(masterNames) def __getattr__(self, name): self.call = name if name in FileCatalog.write_methods: return self.w_execute elif name in FileCatalog.ro_methods: return self.r_execute else: raise AttributeError def w_execute(self, *parms, **kws): """ Write method executor. """ successful = {} failed = {} failedCatalogs = [] fileInfo = parms[0] res = checkArgumentFormat(fileInfo) if not res['OK']: return res fileInfo = res['Value'] allLfns = fileInfo.keys() parms = parms[1:] for catalogName, oCatalog, master in self.writeCatalogs: # Skip if metadata related method on pure File Catalog if self.call in FileCatalog.write_meta_methods and not catalogName in self.metaCatalogs: continue method = getattr(oCatalog, self.call) res = method(fileInfo, *parms, **kws) if not res['OK']: if master: # If this is the master catalog and it fails we dont want to continue with the other catalogs gLogger.error( "FileCatalog.w_execute: Failed to execute %s on master catalog %s." % (self.call, catalogName), res['Message']) return res else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs.append((catalogName, res['Message'])) else: for lfn, message in res['Value']['Failed'].items(): # Save the error message for the failed operations failed.setdefault(lfn, {})[catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop(lfn, None) for lfn, result in res['Value']['Successful'].items(): # Save the result return for each file for the successful operations successful.setdefault(lfn, {})[catalogName] = result # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs: for lfn in allLfns: failed.setdefault(lfn, {})[catalogName] = errorMessage resDict = {'Failed': failed, 'Successful': successful} return S_OK(resDict) def r_execute(self, *parms, **kws): """ Read method executor. """ successful = {} failed = {} for catalogName, oCatalog, _master in self.readCatalogs: # Skip if metadata related method on pure File Catalog if self.call in FileCatalog.ro_meta_methods and not catalogName in self.metaCatalogs: continue method = getattr(oCatalog, self.call) res = method(*parms, **kws) if res['OK']: if 'Successful' in res['Value']: for key, item in res['Value']['Successful'].items(): successful.setdefault(key, item) failed.pop(key, None) for key, item in res['Value']['Failed'].items(): if key not in successful: failed[key] = item else: return res if not successful and not failed: return S_ERROR("Failed to perform %s from any catalog" % self.call) return S_OK({'Failed': failed, 'Successful': successful}) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog(self, catalogName, mode="Write", master=False): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject(catalogName) if not result['OK']: return result oCatalog = result['Value'] if mode.lower().find("read") != -1: self.readCatalogs.append((catalogName, oCatalog, master)) if mode.lower().find("write") != -1: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def removeCatalog(self, catalogName): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range(len(self.readCatalogs)): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range(len(self.writeCatalogs)): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK('Catalog does not exist') def _getSelectedCatalogs(self, desiredCatalogs): for catalogName in desiredCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res['OK']: return res catalogConfig = res['Value'] res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] self.readCatalogs.append((catalogName, oCatalog, True)) self.writeCatalogs.append((catalogName, oCatalog, True)) if catalogConfig.get('MetaCatalog') == 'True': self.metaCatalogs.append(catalogName) return S_OK() def _getCatalogs(self): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources for backward compatibility operationsFlag = False fileCatalogs = self.opHelper.getValue('/Services/Catalogs/CatalogList', []) if fileCatalogs: operationsFlag = True else: fileCatalogs = self.opHelper.getSections('/Services/Catalogs') result = self.opHelper.getSections('/Services/Catalogs') fileCatalogs = [] operationsFlag = False if result['OK']: fileCatalogs = result['Value'] operationsFlag = True else: res = gConfig.getSections(self.rootConfigPath, listOrdered=True) if not res['OK']: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error(errStr, res['Message']) return S_ERROR(errStr) fileCatalogs = res['Value'] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res['OK']: return res catalogConfig = res['Value'] if operationsFlag: result = self.opHelper.getOptionsDict('/Services/Catalogs/%s' % catalogName) if not result['OK']: return result catalogConfig.update(result['Value']) if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject(catalogName) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search('Read', catalogConfig['AccessType']): if master: self.readCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.readCatalogs.append( (catalogName, oCatalog, master)) # If the catalog is write type if re.search('Write', catalogConfig['AccessType']): if master: self.writeCatalogs.insert( 0, (catalogName, oCatalog, master)) else: self.writeCatalogs.append( (catalogName, oCatalog, master)) if catalogConfig.get('MetaCatalog') == 'True': self.metaCatalogs.append(catalogName) return S_OK() def _getCatalogConfigDetails(self, catalogName): # First obtain the options that are available catalogConfigPath = '%s/%s' % (self.rootConfigPath, catalogName) res = gConfig.getOptions(catalogConfigPath) if not res['OK']: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options." gLogger.error(errStr, catalogName) return S_ERROR(errStr) catalogConfig = {} for option in res['Value']: configPath = '%s/%s' % (catalogConfigPath, option) optionValue = gConfig.getValue(configPath) catalogConfig[option] = optionValue # The 'Status' option should be defined (default = 'Active') if 'Status' not in catalogConfig: warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined." gLogger.warn(warnStr, catalogName) catalogConfig['Status'] = 'Active' # The 'AccessType' option must be defined if 'AccessType' not in catalogConfig: errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined." gLogger.error(errStr, catalogName) return S_ERROR(errStr) # Anything other than 'True' in the 'Master' option means it is not catalogConfig['Master'] = (catalogConfig.setdefault('Master', False) == 'True') return S_OK(catalogConfig) def _generateCatalogObject(self, catalogName): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue( '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False) if not useProxy: useProxy = self.opHelper.getValue( '/Services/Catalogs/%s/UseProxy' % catalogName, False) return FileCatalogFactory().createCatalog(catalogName, useProxy)
class Limiter(object): # static variables shared between all instances of this class csDictCache = DictCache() condCache = DictCache() delayMem = {} def __init__(self, jobDB=None, opsHelper=None): """ Constructor """ self.__runningLimitSection = "JobScheduling/RunningLimit" self.__matchingDelaySection = "JobScheduling/MatchingDelay" if jobDB: self.jobDB = jobDB else: self.jobDB = JobDB() self.log = gLogger.getSubLogger("Limiter") if opsHelper: self.__opsHelper = opsHelper else: self.__opsHelper = Operations() def getNegativeCond(self): """ Get negative condition for ALL sites """ orCond = self.condCache.get("GLOBAL") if orCond: return orCond negCond = {} # Run Limit result = self.__opsHelper.getSections(self.__runningLimitSection) sites = [] if result['OK']: sites = result['Value'] for siteName in sites: result = self.__getRunningCondition(siteName) if not result['OK']: continue data = result['Value'] if data: negCond[siteName] = data # Delay limit result = self.__opsHelper.getSections(self.__matchingDelaySection) sites = [] if result['OK']: sites = result['Value'] for siteName in sites: result = self.__getDelayCondition(siteName) if not result['OK']: continue data = result['Value'] if not data: continue if siteName in negCond: negCond[siteName] = self.__mergeCond(negCond[siteName], data) else: negCond[siteName] = data orCond = [] for siteName in negCond: negCond[siteName]['Site'] = siteName orCond.append(negCond[siteName]) self.condCache.add("GLOBAL", 10, orCond) return orCond def getNegativeCondForSite(self, siteName): """ Generate a negative query based on the limits set on the site """ # Check if Limits are imposed onto the site negativeCond = {} if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True): result = self.__getRunningCondition(siteName) if result['OK']: negativeCond = result['Value'] self.log.verbose('Negative conditions for site', '%s after checking limits are: %s' % (siteName, str(negativeCond))) if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True): result = self.__getDelayCondition(siteName) if result['OK']: delayCond = result['Value'] self.log.verbose('Negative conditions for site', '%s after delay checking are: %s' % (siteName, str(delayCond))) negativeCond = self.__mergeCond(negativeCond, delayCond) if negativeCond: self.log.info('Negative conditions for site', '%s are: %s' % (siteName, str(negativeCond))) return negativeCond def __mergeCond(self, negCond, addCond): """ Merge two negative dicts """ # Merge both negative dicts for attr in addCond: if attr not in negCond: negCond[attr] = [] for value in addCond[attr]: if value not in negCond[attr]: negCond[attr].append(value) return negCond def __extractCSData(self, section): """ Extract limiting information from the CS in the form: { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } """ stuffDict = self.csDictCache.get(section) if stuffDict: return S_OK(stuffDict) result = self.__opsHelper.getSections(section) if not result['OK']: return result attribs = result['Value'] stuffDict = {} for attName in attribs: result = self.__opsHelper.getOptionsDict("%s/%s" % (section, attName)) if not result['OK']: return result attLimits = result['Value'] try: attLimits = dict([(k, int(attLimits[k])) for k in attLimits]) except Exception as excp: errMsg = "%s/%s has to contain numbers: %s" % (section, attName, str(excp)) self.log.error(errMsg) return S_ERROR(errMsg) stuffDict[attName] = attLimits self.csDictCache.add(section, 300, stuffDict) return S_OK(stuffDict) def __getRunningCondition(self, siteName): """ Get extra conditions allowing site throttling """ siteSection = "%s/%s" % (self.__runningLimitSection, siteName) result = self.__extractCSData(siteSection) if not result['OK']: return result limitsDict = result['Value'] # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } if not limitsDict: return S_OK({}) # Check if the site exceeding the given limits negCond = {} for attName in limitsDict: if attName not in self.jobDB.jobAttributeNames: self.log.error("Attribute does not exist", "(%s). Check the job limits" % attName) continue cK = "Running:%s:%s" % (siteName, attName) data = self.condCache.get(cK) if not data: result = self.jobDB.getCounters( 'Jobs', [attName], { 'Site': siteName, 'Status': [ 'Running', 'Matched', 'Stalled']}) if not result['OK']: return result data = result['Value'] data = dict([(k[0][attName], k[1]) for k in data]) self.condCache.add(cK, 10, data) for attValue in limitsDict[attName]: limit = limitsDict[attName][attValue] running = data.get(attValue, 0) if running >= limit: self.log.verbose('Job Limit imposed', 'at %s on %s/%s=%d, %d jobs already deployed' % (siteName, attName, attValue, limit, running)) if attName not in negCond: negCond[attName] = [] negCond[attName].append(attValue) # negCond is something like : {'JobType': ['Merge']} return S_OK(negCond) def updateDelayCounters(self, siteName, jid): # Get the info from the CS siteSection = "%s/%s" % (self.__matchingDelaySection, siteName) result = self.__extractCSData(siteSection) if not result['OK']: return result delayDict = result['Value'] # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } if not delayDict: return S_OK() attNames = [] for attName in delayDict: if attName not in self.jobDB.jobAttributeNames: self.log.error("Attribute does not exist in the JobDB. Please fix it!", "(%s)" % attName) else: attNames.append(attName) result = self.jobDB.getJobAttributes(jid, attNames) if not result['OK']: self.log.error("Error while retrieving attributes", "coming from %s: %s" % (siteSection, result['Message'])) return result atts = result['Value'] # Create the DictCache if not there if siteName not in self.delayMem: self.delayMem[siteName] = DictCache() # Update the counters delayCounter = self.delayMem[siteName] for attName in atts: attValue = atts[attName] if attValue in delayDict[attName]: delayTime = delayDict[attName][attValue] self.log.notice("Adding delay for %s/%s=%s of %s secs" % (siteName, attName, attValue, delayTime)) delayCounter.add((attName, attValue), delayTime) return S_OK() def __getDelayCondition(self, siteName): """ Get extra conditions allowing matching delay """ if siteName not in self.delayMem: return S_OK({}) lastRun = self.delayMem[siteName].getKeys() negCond = {} for attName, attValue in lastRun: if attName not in negCond: negCond[attName] = [] negCond[attName].append(attValue) return S_OK(negCond)
class DMSHelpers(object): """ This class is used to get information about sites, SEs and their interrelations """ def __init__(self, vo=False): self.siteSEMapping = {} self.storageElementSet = set() self.siteSet = set() self.__opsHelper = Operations(vo=vo) self.failoverSEs = None self.archiveSEs = None self.notForJobSEs = None def getSiteSEMapping(self): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} """ if self.siteSEMapping: return S_OK(self.siteSEMapping) # Get the list of SEs and keep a mapping of those using an Alias or a # BaseSE storageElements = gConfig.getSections('Resources/StorageElements') if not storageElements['OK']: gLogger.warn('Problem retrieving storage elements', storageElements['Message']) return storageElements storageElements = storageElements['Value'] equivalentSEs = {} for se in storageElements: for option in ('BaseSE', 'Alias'): originalSE = gConfig.getValue( 'Resources/StorageElements/%s/%s' % (se, option)) if originalSE: equivalentSEs.setdefault(originalSE, []).append(se) break siteSEMapping = {} gridTypes = gConfig.getSections('Resources/Sites/') if not gridTypes['OK']: gLogger.warn( 'Problem retrieving sections in /Resources/Sites', gridTypes['Message']) return gridTypes gridTypes = gridTypes['Value'] gLogger.debug('Grid Types are: %s' % (', '.join(gridTypes))) # Get a list of sites and their local SEs siteSet = set() storageElementSet = set() siteSEMapping[LOCAL] = {} for grid in gridTypes: result = gConfig.getSections('/Resources/Sites/%s' % grid) if not result['OK']: gLogger.warn('Problem retrieving /Resources/Sites/%s section' % grid) return result sites = result['Value'] siteSet.update(sites) for site in sites: candidateSEs = gConfig.getValue( '/Resources/Sites/%s/%s/SE' % (grid, site), []) if candidateSEs: candidateSEs += [ eqSE for se in candidateSEs for eqSE in equivalentSEs.get(se, [])] siteSEMapping[LOCAL].setdefault(site, set()).update(candidateSEs) storageElementSet.update(candidateSEs) # Add Sites from the SiteSEMappingByProtocol in the CS siteSEMapping[PROTOCOL] = {} cfgLocalSEPath = cfgPath('SiteSEMappingByProtocol') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result['OK']: sites = result['Value'] for site in sites: candidates = set(self.__opsHelper.getValue( cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet) ) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[PROTOCOL].setdefault(site, set()).update(ses) # Add Sites from the SiteSEMappingByDownload in the CS, else # SiteLocalSEMapping (old convention) siteSEMapping[DOWNLOAD] = {} cfgLocalSEPath = cfgPath('SiteSEMappingByDownload') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if not result['OK']: cfgLocalSEPath = cfgPath('SiteLocalSEMapping') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result['OK']: sites = result['Value'] for site in sites: candidates = set(self.__opsHelper.getValue( cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet) ) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[DOWNLOAD].setdefault(site, set()).update(ses) self.siteSEMapping = siteSEMapping # Add storage elements that may not be associated with a site result = gConfig.getSections('/Resources/StorageElements') if not result['OK']: gLogger.warn( 'Problem retrieving /Resources/StorageElements section', result['Message']) return result self.storageElementSet = storageElementSet | set(result['Value']) self.siteSet = siteSet return S_OK(siteSEMapping) def getSites(self): """ Get the list of known sites """ self.getSiteSEMapping() return sorted(self.siteSet) def getTiers(self, withStorage=False, tier=None): """ Get the list of sites for a given (list of) Tier level """ sites = sorted(self.getShortSiteNames( withStorage=withStorage, tier=tier).values()) if sites and isinstance(sites[0], list): # List of lists, flatten it sites = [s for sl in sites for s in sl] return sites def getShortSiteNames(self, withStorage=True, tier=None): """ Create a directory of short site names pointing to full site names """ siteDict = {} result = self.getSiteSEMapping() if result['OK']: for site in self.siteSEMapping[LOCAL] if withStorage else self.siteSet: grid, shortSite, _country = site.split('.') if isinstance(tier, (int, long)) and \ (grid != 'LCG' or gConfig.getValue('/Resources/Sites/%s/%s/MoUTierLevel' % (grid, site), 999) != tier): continue if isinstance(tier, (list, tuple, dict, set)) and \ (grid != 'LCG' or gConfig.getValue('/Resources/Sites/%s/%s/MoUTierLevel' % (grid, site), 999) not in tier): continue if withStorage or tier is not None: siteDict[shortSite] = site else: siteDict.setdefault(shortSite, []).append(site) return siteDict def getStorageElements(self): """ Get the list of known SEs """ self.getSiteSEMapping() return sorted(self.storageElementSet) def isSEFailover(self, storageElement): """ Is this SE a failover SE """ if self.failoverSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsUsedForFailover', [])) self.failoverSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.failoverSEs or (not self.failoverSEs and isinstance(storageElement, basestring) and 'FAILOVER' in storageElement.upper()) def isSEForJobs(self, storageElement, checkSE=True): """ Is this SE suitable for making jobs """ if checkSE: self.getSiteSEMapping() if storageElement not in self.storageElementSet: return False if self.notForJobSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsNotToBeUsedForJobs', [])) self.notForJobSEs = resolveSEGroup(seList) return storageElement not in self.notForJobSEs def isSEArchive(self, storageElement): """ Is this SE an archive SE """ if self.archiveSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsUsedForArchive', [])) self.archiveSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.archiveSEs or (not self.archiveSEs and isinstance(storageElement, basestring) and 'ARCHIVE' in storageElement.upper()) def getSitesForSE(self, storageElement, connectionLevel=None): """ Get the (list of) sites for a given SE and a given connctivity """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex == LOCAL: return self._getLocalSitesForSE(storageElement) if connectionIndex == PROTOCOL: return self.getProtocolSitesForSE(storageElement) if connectionIndex == DOWNLOAD: return self.getDownloadSitesForSE(storageElement) return S_ERROR("Unknown connection level") def getLocalSiteForSE(self, se): """ Get the site at which the SE is """ sites = self._getLocalSitesForSE(se) if not sites['OK']: return sites if not sites['Value']: return S_OK(None) return S_OK(sites['Value'][0]) def _getLocalSitesForSE(self, se): """ Extract the list of sites that declare this SE """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][LOCAL] sites = [site for site in mapping if se in mapping[site]] if len(sites) > 1 and self.__opsHelper.getValue('DataManagement/ForceSingleSitePerSE', True): return S_ERROR('SE is at more than one site') return S_OK(sites) def getProtocolSitesForSE(self, se): """ Get sites that can access the SE by protocol """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][PROTOCOL] sites = self._getLocalSitesForSE(se) if not sites['OK']: return sites sites = set(sites['Value']) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getDownloadSitesForSE(self, se): """ Get the list of sites that are allowed to download files """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][DOWNLOAD] sites = self.getProtocolSitesForSE(se) if not sites['OK']: return sites sites = set(sites['Value']) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getSEsForSite(self, site, connectionLevel=None): """ Get all SEs accessible from a site, given a connectivity """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() if site not in self.siteSet: siteList = [s for s in self.siteSet if '.%s.' % site in s] else: siteList = [site] if not siteList: return S_ERROR("Unknown site") return self._getSEsForSItes(siteList, connectionIndex=connectionIndex) def _getSEsForSItes(self, siteList, connectionIndex): """ Extract list of SEs for a connectivity """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping ses = [] for index in range(LOCAL, connectionIndex + 1): for site in siteList: ses += mapping['Value'][index].get(site, []) if not ses: return S_ERROR('No SE found') return S_OK(sorted(ses)) def getSEsAtSite(self, site): """ Get local SEs """ return self.getSEsForSite(site, connectionLevel=LOCAL) def isSameSiteSE(self, se1, se2): """ Are these 2 SEs at the same site """ res = self.getLocalSiteForSE(se1) if not res['OK']: return res site1 = res['Value'] res = self.getLocalSiteForSE(se2) if not res['OK']: return res site2 = res['Value'] return S_OK(site1 == site2) def getSEsAtCountry(self, country, connectionLevel=None): """ Get all SEs at a given country """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() siteList = [site for site in self.siteSet if siteCountryName( site) == country.lower()] if not siteList: return S_ERROR("No SEs found in country") return self._getSEsForSItes(siteList, connectionIndex) def getSEInGroupAtSite(self, seGroup, site): """ Get the SE in a group or list of SEs that is present at a site """ seList = self.getAllSEsInGroupAtSite(seGroup, site) if not seList['OK'] or seList['Value'] is None: return seList return S_OK(seList['Value'][0]) def getAllSEsInGroupAtSite(self, seGroup, site): """ Get all SEs in a group or list of SEs that are present at a site """ seList = resolveSEGroup(seGroup) if not seList: return S_ERROR('SEGroup does not exist') sesAtSite = self.getSEsAtSite(site) if not sesAtSite['OK']: return sesAtSite foundSEs = set(seList) & set(sesAtSite['Value']) if not foundSEs: gLogger.warn('No SE found at that site', 'in group %s at %s' % (seGroup, site)) return S_OK() return S_OK(sorted(foundSEs)) def getRegistrationProtocols(self): """ Returns the Favorite registration protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/RegistrationProtocols', ['srm', 'dips']) def getThirdPartyProtocols(self): """ Returns the Favorite third party protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/ThirdPartyProtocols', ['srm']) def getAccessProtocols(self): """ Returns the Favorite access protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/AccessProtocols', ['srm', 'dips']) def getWriteProtocols(self): """ Returns the Favorite Write protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/WriteProtocols', ['srm', 'dips'])
class InputDataResolution(object): """ ILC specific input data resolution, imported from DIRAC """ ############################################################################# def __init__(self, argumentsDict): """ Standard constructor """ self.arguments = argumentsDict self.name = COMPONENT_NAME self.log = gLogger.getSubLogger(self.name) self.ops = Operations() ############################################################################# def execute(self): """Given the arguments from the Job Wrapper, this function calls existing utilities in DIRAC to resolve input data according to LHCb VO policy. """ result = self.__resolveInputData() if not result['OK']: self.log.error('InputData resolution failed with result:\n%s' % (result)) #For local running of this module we can expose an option to ignore missing files ignoreMissing = False if 'IgnoreMissing' in self.arguments: ignoreMissing = self.arguments['IgnoreMissing'] #For LHCb original policy was as long as one TURL exists, this can be conveyed to the application #this breaks due to the stripping so the policy has been changed. if 'Failed' in result: failedReplicas = result['Failed'] if failedReplicas and not ignoreMissing: self.log.error( 'Failed to obtain access to the following files:\n%s' % (string.join(failedReplicas, '\n'))) return S_ERROR('Failed to access all of requested input data') if 'Successful' not in result: return result if not result['Successful']: return S_ERROR('Could not access any requested input data') return result ############################################################################# def __resolveInputData(self): """This method controls the execution of the DIRAC input data modules according to the ILC VO policy defined in the configuration service. """ if 'SiteName' in self.arguments['Configuration']: site = self.arguments['Configuration']['SiteName'] else: site = DIRAC.siteName() policy = [] if 'Job' not in self.arguments: self.arguments['Job'] = {} if 'InputDataPolicy' in self.arguments['Job']: policy = self.arguments['Job']['InputDataPolicy'] #In principle this can be a list of modules with the first taking precedence if type(policy) in types.StringTypes: policy = [policy] self.log.info('Job has a specific policy setting: %s' % (string.join(policy, ', '))) else: self.log.verbose( 'Attempting to resolve input data policy for site %s' % site) inputDataPolicy = self.ops.getOptionsDict('/InputDataPolicy') if not inputDataPolicy: return S_ERROR( 'Could not resolve InputDataPolicy from /InputDataPolicy') options = inputDataPolicy['Value'] if site in options: policy = options[site] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Found specific input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) elif 'Default' in options: policy = options['Default'] policy = [x.strip() for x in string.split(policy, ',')] self.log.info( 'Applying default input data policy for site %s:\n%s' % (site, string.join(policy, ',\n'))) dataToResolve = None #if none, all supplied input data is resolved allDataResolved = False successful = {} failedReplicas = [] for modulePath in policy: if not allDataResolved: result = self.__runModule(modulePath, dataToResolve) if not result['OK']: self.log.warn('Problem during %s execution' % modulePath) return result if 'Failed' in result: failedReplicas = result['Failed'] if failedReplicas: self.log.info( '%s failed for the following files:\n%s' % (modulePath, string.join(failedReplicas, '\n'))) dataToResolve = failedReplicas else: self.log.info('All replicas resolved after %s execution' % (modulePath)) allDataResolved = True successful.update(result['Successful']) self.log.verbose(successful) result = S_OK() result['Successful'] = successful result['Failed'] = failedReplicas return result ############################################################################# def __runModule(self, modulePath, remainingReplicas): """This method provides a way to run the modules specified by the VO that govern the input data access policy for the current site. For LHCb the standard WMS modules are applied in a different order depending on the site. """ self.log.info('Attempting to run %s' % (modulePath)) moduleFactory = ModuleFactory() moduleInstance = moduleFactory.getModule(modulePath, self.arguments) if not moduleInstance['OK']: return moduleInstance module = moduleInstance['Value'] result = module.execute(remainingReplicas) return result
class FileCatalog(object): def __init__(self, catalogs=None, vo=None): """Default constructor""" self.valid = True self.timeout = 180 self.ro_methods = set() self.write_methods = set() self.no_lfn_methods = set() self.readCatalogs = [] self.writeCatalogs = [] self.rootConfigPath = "/Resources/FileCatalogs" self.vo = vo if vo else getVOfromProxyGroup().get("Value", None) self.log = gLogger.getSubLogger(self.__class__.__name__) self.opHelper = Operations(vo=self.vo) catalogList = [] if isinstance(catalogs, six.string_types): catalogList = [catalogs] elif isinstance(catalogs, (list, tuple)): catalogList = list(catalogs) if catalogList: result = self._getEligibleCatalogs() if not result["OK"]: self.log.error("Failed to get eligible catalog") return eligibleFileCatalogs = result["Value"] catalogCheck = True for catalog in catalogList: if catalog not in eligibleFileCatalogs: self.log.error("Specified catalog is not eligible", catalog) catalogCheck = False if catalogCheck: result = self._getSelectedCatalogs(catalogList) else: result = S_ERROR("Specified catalog is not eligible") else: result = self._getCatalogs() if not result["OK"]: self.log.error("Failed to create catalog objects") self.valid = False elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0): self.log.error("No catalog object created") self.valid = False result = self.getMasterCatalogNames() masterCatalogs = result["Value"] # There can not be more than one master catalog haveMaster = False if len(masterCatalogs) > 1: self.log.error("More than one master catalog created") self.valid = False elif len(masterCatalogs) == 1: haveMaster = True # Get the list of write methods if haveMaster: # All the write methods must be present in the master _catalogName, oCatalog, _master = self.writeCatalogs[0] _roList, writeList, nolfnList = oCatalog.getInterfaceMethods() self.write_methods.update(writeList) self.no_lfn_methods.update(nolfnList) else: for _catalogName, oCatalog, _master in self.writeCatalogs: _roList, writeList, nolfnList = oCatalog.getInterfaceMethods() self.write_methods.update(writeList) self.no_lfn_methods.update(nolfnList) # Get the list of read methods for _catalogName, oCatalog, _master in self.readCatalogs: roList, _writeList, nolfnList = oCatalog.getInterfaceMethods() self.ro_methods.update(roList) self.no_lfn_methods.update(nolfnList) self.condParser = FCConditionParser(vo=self.vo, ro_methods=self.ro_methods) def isOK(self): return self.valid def getReadCatalogs(self): return self.readCatalogs def getWriteCatalogs(self): return self.writeCatalogs def getMasterCatalogNames(self): """Returns the list of names of the Master catalogs""" masterNames = [catalogName for catalogName, oCatalog, master in self.writeCatalogs if master] return S_OK(masterNames) def __getattr__(self, name): self.call = name if name in self.write_methods: return self.w_execute elif name in self.ro_methods: return self.r_execute else: raise AttributeError def w_execute(self, *parms, **kws): """Write method executor. If one of the LFNs given as input does not pass a condition defined for the master catalog, we return S_ERROR without trying anything else :param fcConditions: either a dict or a string, to be propagated to the FCConditionParser * If it is a string, it is given for all catalogs * If it is a dict, it has to be { catalogName: condition}, and only the specific condition for the catalog will be given .. warning :: If the method is a write no_lfn method, then the return value are completely different. We only return the result of the master catalog """ successful = {} failed = {} failedCatalogs = {} successfulCatalogs = {} specialConditions = kws.pop("fcConditions") if "fcConditions" in kws else None allLfns = [] lfnMapDict = {} masterResult = {} parms1 = [] if self.call not in self.no_lfn_methods: fileInfo = parms[0] result = checkArgumentFormat(fileInfo, generateMap=True) if not result["OK"]: return result fileInfo, lfnMapDict = result["Value"] # No need to check the LFNs again in the clients kws["LFNChecking"] = False allLfns = list(fileInfo) parms1 = parms[1:] for catalogName, oCatalog, master in self.writeCatalogs: # Skip if the method is not implemented in this catalog # NOTE: it is impossible for the master since the write method list is populated # only from the master catalog, and if the method is not there, __getattr__ # would raise an exception if not oCatalog.hasCatalogMethod(self.call): continue method = getattr(oCatalog, self.call) if self.call in self.no_lfn_methods: result = method(*parms, **kws) else: if isinstance(specialConditions, dict): condition = specialConditions.get(catalogName) else: condition = specialConditions # Check whether this catalog should be used for this method res = self.condParser(catalogName, self.call, fileInfo, condition=condition) # condParser never returns S_ERROR condEvals = res["Value"]["Successful"] # For a master catalog, ALL the lfns should be valid if master: if any([not valid for valid in condEvals.values()]): gLogger.error("The master catalog is not valid for some LFNS", condEvals) return S_ERROR("The master catalog is not valid for some LFNS %s" % condEvals) validLFNs = dict((lfn, fileInfo[lfn]) for lfn in condEvals if condEvals[lfn]) # We can skip the execution without worry, # since at this level it is for sure not a master catalog if not validLFNs: gLogger.debug("No valid LFN, skipping the call") continue invalidLFNs = [lfn for lfn in condEvals if not condEvals[lfn]] if invalidLFNs: gLogger.debug( "Some LFNs are not valid for operation '%s' on catalog '%s' : %s" % (self.call, catalogName, invalidLFNs) ) result = method(validLFNs, *parms1, **kws) if master: masterResult = result if not result["OK"]: if master: # If this is the master catalog and it fails we don't want to continue with the other catalogs self.log.error( "Failed to execute call on master catalog", "%s on %s: %s" % (self.call, catalogName, result["Message"]), ) return result else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs[catalogName] = result["Message"] else: successfulCatalogs[catalogName] = result["Value"] if allLfns: if result["OK"]: for lfn, message in result["Value"]["Failed"].items(): # Save the error message for the failed operations failed.setdefault(lfn, {})[catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop(lfn, None) for lfn, result in result["Value"]["Successful"].items(): # Save the result return for each file for the successful operations successful.setdefault(lfn, {})[catalogName] = result if allLfns: # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs.items(): for lfn in allLfns: failed.setdefault(lfn, {})[catalogName] = errorMessage # Restore original lfns if they were changed by normalization if lfnMapDict: for lfn in list(failed): failed[lfnMapDict.get(lfn, lfn)] = failed.pop(lfn) for lfn in list(successful): successful[lfnMapDict.get(lfn, lfn)] = successful.pop(lfn) resDict = {"Failed": failed, "Successful": successful} return S_OK(resDict) else: # FIXME: Return just master result here. This is temporary as more detailed # per catalog result needs multiple fixes in various client calls return masterResult def r_execute(self, *parms, **kws): """Read method executor.""" successful = {} failed = {} for _catalogName, oCatalog, _master in self.readCatalogs: # Skip if the method is not implemented in this catalog if not oCatalog.hasCatalogMethod(self.call): continue method = getattr(oCatalog, self.call) res = method(*parms, **kws) if res["OK"]: if "Successful" in res["Value"]: for key, item in res["Value"]["Successful"].items(): successful.setdefault(key, item) failed.pop(key, None) for key, item in res["Value"]["Failed"].items(): if key not in successful: failed[key] = item else: return res if not successful and not failed: return S_ERROR(DErrno.EFCERR, "Failed to perform %s from any catalog" % self.call) return S_OK({"Failed": failed, "Successful": successful}) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog(self, catalogName, mode="Write", master=False): """Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject(catalogName) if not result["OK"]: return result oCatalog = result["Value"] if mode.lower().find("read") != -1: self.readCatalogs.append((catalogName, oCatalog, master)) if mode.lower().find("write") != -1: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def removeCatalog(self, catalogName): """Remove the specified catalog from the internal pool""" catalog_removed = False for i in range(len(self.readCatalogs)): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range(len(self.writeCatalogs)): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK("Catalog does not exist") def _getSelectedCatalogs(self, desiredCatalogs): for catalogName in desiredCatalogs: result = self._getCatalogConfigDetails(catalogName) if not result["OK"]: return result catalogConfig = result["Value"] result = self._generateCatalogObject(catalogName) if not result["OK"]: return result oCatalog = result["Value"] if re.search("Read", catalogConfig["AccessType"]): if catalogConfig["Master"]: self.readCatalogs.insert(0, (catalogName, oCatalog, catalogConfig["Master"])) else: self.readCatalogs.append((catalogName, oCatalog, catalogConfig["Master"])) if re.search("Write", catalogConfig["AccessType"]): if catalogConfig["Master"]: self.writeCatalogs.insert(0, (catalogName, oCatalog, catalogConfig["Master"])) else: self.writeCatalogs.append((catalogName, oCatalog, catalogConfig["Master"])) return S_OK() def _getEligibleCatalogs(self): """Get a list of eligible catalogs :return: S_OK/S_ERROR, Value - a list of catalog names """ # First, look in the Operations, if nothing defined look in /Resources for backward compatibility fileCatalogs = self.opHelper.getValue("/Services/Catalogs/CatalogList", []) if not fileCatalogs: result = self.opHelper.getSections("/Services/Catalogs") if result["OK"]: fileCatalogs = result["Value"] else: res = gConfig.getSections(self.rootConfigPath, listOrdered=True) if not res["OK"]: errStr = "FileCatalog._getEligibleCatalogs: Failed to get file catalog configuration." self.log.error(errStr, res["Message"]) return S_ERROR(errStr) fileCatalogs = res["Value"] return S_OK(fileCatalogs) def _getCatalogs(self): """Updates self.readCatalogs and self.writeCatalogs with list of catalog objects as found in the CS""" # Get the eligible catalogs first result = self._getEligibleCatalogs() if not result["OK"]: return result fileCatalogs = result["Value"] # Get the catalog objects now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails(catalogName) if not res["OK"]: return res catalogConfig = res["Value"] if catalogConfig["Status"] == "Active": res = self._generateCatalogObject(catalogName) if not res["OK"]: return res oCatalog = res["Value"] master = catalogConfig["Master"] # If the catalog is read type if re.search("Read", catalogConfig["AccessType"]): if master: self.readCatalogs.insert(0, (catalogName, oCatalog, master)) else: self.readCatalogs.append((catalogName, oCatalog, master)) # If the catalog is write type if re.search("Write", catalogConfig["AccessType"]): if master: self.writeCatalogs.insert(0, (catalogName, oCatalog, master)) else: self.writeCatalogs.append((catalogName, oCatalog, master)) return S_OK() def _getCatalogConfigDetails(self, catalogName): # First obtain the options that are available catalogConfigPath = "%s/%s" % (self.rootConfigPath, catalogName) result = gConfig.getOptionsDict(catalogConfigPath) if not result["OK"]: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options." self.log.error(errStr, catalogName) return S_ERROR(errStr) catalogConfig = result["Value"] result = self.opHelper.getOptionsDict("/Services/Catalogs/%s" % catalogName) if result["OK"]: catalogConfig.update(result["Value"]) # The 'Status' option should be defined (default = 'Active') if "Status" not in catalogConfig: warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined." self.log.warn(warnStr, catalogName) catalogConfig["Status"] = "Active" # The 'AccessType' option must be defined if "AccessType" not in catalogConfig: errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined." self.log.error(errStr, catalogName) return S_ERROR(errStr) # Anything other than 'True' in the 'Master' option means it is not catalogConfig["Master"] = catalogConfig.setdefault("Master", False) == "True" return S_OK(catalogConfig) def _generateCatalogObject(self, catalogName): """Create a file catalog object from its name and CS description""" useProxy = gConfig.getValue("/LocalSite/Catalogs/%s/UseProxy" % catalogName, False) if not useProxy: useProxy = self.opHelper.getValue("/Services/Catalogs/%s/UseProxy" % catalogName, False) return FileCatalogFactory().createCatalog(catalogName, useProxy)
class RssConfiguration(object): """ RssConfiguration:: { Config: { State : Active | InActive, Cache : 300, FromAddress : '*****@*****.**' StatusType : { default : all, StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess } } } """ def __init__(self): self.opsHelper = Operations() def getConfigState(self, default="InActive"): """ Gets from <pathToRSSConfiguration>/Config the value of State """ return self.opsHelper.getValue("%s/Config/State" % _rssConfigPath, default) def getConfigCache(self, default=300): """ Gets from <pathToRSSConfiguration>/Config the value of Cache """ return self.opsHelper.getValue("%s/Config/Cache" % _rssConfigPath, default) def getConfigFromAddress(self, default=None): """ Gets from <pathToRSSConfiguration>/Config the value of FromAddress """ return self.opsHelper.getValue("%s/Config/FromAddress" % _rssConfigPath, default) def getConfigStatusType(self, elementType=None): """ Gets all the status types per elementType, if not given, it takes default from CS. If not, hardcoded variable DEFAULT. """ _DEFAULTS = ("all",) res = self.opsHelper.getOptionsDict("%s/Config/StatusTypes" % _rssConfigPath) if res["OK"]: if elementType in res["Value"]: return List.fromChar(res["Value"][elementType]) if "default" in res["Value"]: return List.fromChar(res["Value"]["default"]) return _DEFAULTS
class GeneratorModels(object): """ Contains the list of known models """ def __init__(self): self.ops = Operations() self.models = {} res = self.ops.getOptionsDict("/Models") if res['OK']: self.models = res['Value'] def hasModel(self, model): """ Check that specified model exists """ if model in self.models: return S_OK() else: return S_ERROR("Model %s is not defined, use any of %s" % (model, self.models.keys())) def getFile(self, model): """ Return the proper model file (usually LesHouches) """ res = self.hasModel(model) if not res['OK']: return res if not self.models[model]: return S_ERROR("No file attached to model %s" % model) return S_OK(self.models[model]) def getParamsForWhizard(self, model): """ When creating the final file, this is needed to get the parameters for the SM """ params = '' if model == 'sm': params = """<GF type="float" value="1.16639E-5"> <!-- Fermi constant --> </GF> <mZ type="float" value="91.1882"> <!-- Z-boson mass --> </mZ> <mW type="float" value="80.419"> <!-- W-boson mass --> </mW> <mH type="float" value="120"> <!-- Higgs mass --> </mH> <alphas type="float" value="0.1178"> <!-- Strong coupling constant alpha_s(MZ) --> </alphas> <me type="float" value="0."> <!-- electron mass --> </me> <mmu type="float" value="0.1066"> <!-- muon mass --> </mmu> <mtau type="float" value="1.777"> <!-- tau-lepton mass --> </mtau> <ms type="float" value="0."> <!-- s-quark mass --> </ms> <mc type="float" value="0.54"> <!-- c-quark mass --> </mc> <mb type="float" value="2.9"> <!-- b-quark mass --> </mb> <mtop type="float" value="174"> <!-- t-quark mass --> </mtop> <wtop type="float" value="1.523"> <!-- t-quark width --> </wtop> <wZ type="float" value="2.443"> <!-- Z-boson width --> </wZ> <wW type="float" value="2.049"> <!-- W-boson width --> </wW> <wH type="float" value="0.3605E-02"> <!-- Higgs width --> </wH> <vckm11 type="float" value="0.97383"> <!-- Vud --> </vckm11> <vckm12 type="float" value="0.2272"> <!-- Vus --> </vckm12> <vckm13 type="float" value="0.00396"> <!-- Vub --> </vckm13> <vckm21 type="float" value="-0.2271"> <!-- Vcd --> </vckm21> <vckm22 type="float" value="0.97296"> <!-- Vcs --> </vckm22> <vckm23 type="float" value="0.04221"> <!-- Vcb --> </vckm23> <vckm31 type="float" value="0.00814"> <!-- Vtd --> </vckm31> <vckm32 type="float" value="-0.04161"> <!-- Vts --> </vckm32> <vckm33 type="float" value="0.99910"> <!-- Vtb --> </vckm33> <khgaz type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgaz> <khgaga type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgaga> <khgg type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgg> """ else: params = """ """ return S_OK(params)
def web_getLaunchpadOpts(self): defaultParams = {"JobName" : [1, 'DIRAC'], "Executable" : [1, "/bin/ls"], "Arguments" : [1, "-ltrA"], "OutputSandbox" : [1, "std.out, std.err"], "JobGroup" : [0, "Unknown"], "InputData" : [0, ""], "OutputData" : [0, ""], "OutputSE" : [0, "DIRAC-USER"], "OutputPath": [0, ""], "CPUTime" : [0, "86400"], "Site" : [0, ""], "BannedSite" : [0, ""], "Platform" : [0, "Linux_x86_64_glibc-2.12"], "Priority" : [0, "5"], "StdError" : [0, "std.err"], "StdOutput" : [0, "std.out"], "Parameters" : [0, "0"], "ParameterStart" : [0, "0"], "ParameterStep" : [0, "1"], "ParameterFactor": [0, "0"]} delimiter = gConfig.getValue("/WebApp/Launchpad/ListSeparator" , ',') options = self.__getOptionsFromCS(delimiter=delimiter) # platform = self.__getPlatform() # if platform and options: # if not options.has_key("Platform"): # options[ "Platform" ] = platform # else: # csPlatform = list(options[ "Platform" ]) # allPlatforms = csPlatform + platform # platform = uniqueElements(allPlatforms) # options[ "Platform" ] = platform gLogger.debug("Combined options from CS: %s" % options) override = gConfig.getValue("/WebApp/Launchpad/OptionsOverride" , False) gLogger.info("end __getLaunchpadOpts") # Updating the default values from OptionsOverride configuration branch for key in options: if key not in defaultParams: defaultParams[key] = [ 0, "" ] defaultParams[key][1] = options[key][0] # Reading of the predefined sets of launchpad parameters values obj = Operations( vo = self.vo ) predefinedSets = {} launchpadSections = obj.getSections("Launchpad") import pprint if launchpadSections['OK']: for section in launchpadSections["Value"]: predefinedSets[section] = {} sectionOptions = obj.getOptionsDict("Launchpad/" + section) pprint.pprint(sectionOptions) if sectionOptions['OK']: predefinedSets[section] = sectionOptions["Value"] self.write({"success":"true", "result":defaultParams, "predefinedSets":predefinedSets})
class DDSim( LCApplication ): """ DDSim Application Class """ def __init__(self, paramdict = None): self.startFrom = 0 self.randomSeed = 0 self.detectorModel = '' super(DDSim, self).__init__( paramdict ) ##Those 5 need to come after default constructor self._modulename = 'DDSimAnalysis' self._moduledescription = 'Module to run DDSim' self.appname = 'ddsim' self.datatype = 'SIM' self.detectortype = '' self._paramsToExclude.extend( [ "outputDstPath", "outputRecPath", "OutputDstFile", "OutputRecFile" ] ) self._ops = Operations() def setRandomSeed(self, randomSeed): """ Optional: Define random seed to use. Default is the jobID. :param int randomSeed: Seed to use during simulation. """ self._checkArgs( { 'randomSeed' : types.IntType } ) self.randomSeed = randomSeed def setDetectorModel(self, detectorModel): """Define detector model to use for ddsim simulation The detector model can be a collection of XML files Either one has to use a detector model provided by LCGeo or DD4hep, which can be found on CVMFS or the complete XML needs to be passed as a tarball in the input sandbox or on the grid The tarball name must be detectorModel plus extension The tarball must contain all xml files inside a folder called detectorModel. That is the main file is located in detectorModel/detectorModel.xml :param string detectorModel: Detector Model to use for DDSim simulation. Can be on CVMFS, tarball LFN or inputSandbox tarball """ self._checkArgs( { 'detectorModel' : types.StringTypes } ) extensions = (".zip", ".tar.gz", ".tgz") ## file on the grid if detectorModel.lower().startswith("lfn:"): self.inputSB.append(detectorModel) self.detectorModel = os.path.basename(detectorModel) for ext in extensions: if detectorModel.endswith(ext): self.detectorModel = os.path.basename(detectorModel).replace( ext, '' ) return S_OK() ## local file elif detectorModel.endswith( extensions ): for ext in extensions: if detectorModel.endswith(ext): self.detectorModel = os.path.basename(detectorModel).replace( ext, '' ) break if os.path.exists(detectorModel): self.inputSB.append(detectorModel) else: self._log.notice("Specified detector model file does not exist locally, I hope you know what you're doing") return S_OK() ## DetectorModel is part of the software else: knownDetectors = self.getKnownDetectorModels() if not knownDetectors['OK']: self._log.error("Failed to get knownDetectorModels", knownDetectors["Message"] ) return knownDetectors elif detectorModel in knownDetectors['Value']: self.detectorModel = detectorModel else: self._log.error("Unknown detector model: ", detectorModel ) return S_ERROR( "Unknown detector model in ddsim: %s" % detectorModel ) return S_OK() def setStartFrom(self, startfrom): """ Optional: Define from where ddsim starts to read in the input file :param int startfrom: from where ddsim starts to read the input file """ self._checkArgs( { 'startfrom' : types.IntType } ) self.startFrom = startfrom def _userjobmodules(self, stepdefinition): res1 = self._setApplicationModuleAndParameters(stepdefinition) res2 = self._setUserJobFinalization(stepdefinition) if not res1["OK"] or not res2["OK"] : return S_ERROR('userjobmodules failed') return S_OK() def _prodjobmodules(self, stepdefinition): res1 = self._setApplicationModuleAndParameters(stepdefinition) res2 = self._setOutputComputeDataList(stepdefinition) if not res1["OK"] or not res2["OK"] : return S_ERROR('prodjobmodules failed') return S_OK() def _checkConsistency(self): """ FIXME """ if not self.version: return S_ERROR('No version found') if self.steeringFile: if not os.path.exists(self.steeringFile) and not self.steeringFile.lower().count("lfn:"): res = Exists(self.steeringFile) if not res['OK']: return res if not self.detectorModel: return S_ERROR("No detectorModel set") #res = self._checkRequiredApp() #if not res['OK']: # return res if self._jobtype != 'User': self._listofoutput.append({"outputFile":"@{OutputFile}", "outputPath":"@{OutputPath}", "outputDataSE":'@{OutputSE}'}) self.prodparameters['detectorType'] = self.detectortype if self.detectorModel: self.prodparameters['slic_detectormodel'] = self.detectorModel if not self.startFrom : self._log.info('No startFrom defined for DDSim : start from the beginning') return S_OK() def _applicationModule(self): md1 = self._createModuleDefinition() md1.addParameter(Parameter("randomSeed", 0, "int", "", "", False, False, "Random seed for the generator")) md1.addParameter(Parameter("detectorModel", "", "string", "", "", False, False, "Detecor model for simulation")) md1.addParameter(Parameter("startFrom", 0, "int", "", "", False, False, "From where DDSim starts to read the input file")) md1.addParameter(Parameter("debug", False, "bool", "", "", False, False, "debug mode")) return md1 def _applicationModuleValues(self, moduleinstance): moduleinstance.setValue("randomSeed", self.randomSeed) moduleinstance.setValue("detectorModel", self.detectorModel) moduleinstance.setValue("startFrom", self.startFrom) moduleinstance.setValue("debug", self.debug) def _checkWorkflowConsistency(self): return self._checkRequiredApp() def _resolveLinkedStepParameters(self, stepinstance): if type(self._linkedidx) == types.IntType: self._inputappstep = self._jobsteps[self._linkedidx] if self._inputappstep: stepinstance.setLink("InputFile", self._inputappstep.getType(), "OutputFile") return S_OK() def getKnownDetectorModels( self, version=None ): """return a list of known detectorModels Depends on the version of the software though... :param string version: Optional: Software version for which to print the detector models. If not given the version of the application instance is used. :returns: S_OK with list of detector models known for this software version, S_ERROR """ if version is None and not self.version: return S_ERROR( "No software version defined" ) detectorModels = self._ops.getOptionsDict("/DDSimDetectorModels/%s" % (self.version)) return detectorModels
class RssConfiguration: """ RssConfiguration:: { Config: { State : Active | InActive, Cache : 300, FromAddress : '*****@*****.**' StatusType : { default : all, StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess } } } """ def __init__(self): self.opsHelper = Operations() def getConfigState(self, default="InActive"): """ Gets from <pathToRSSConfiguration>/Config the value of State """ return self.opsHelper.getValue("%s/Config/State" % _rssConfigPath, default) def getConfigCache(self, default=300): """ Gets from <pathToRSSConfiguration>/Config the value of Cache """ return self.opsHelper.getValue("%s/Config/Cache" % _rssConfigPath, default) def getConfigFromAddress(self, default=None): """ Gets from <pathToRSSConfiguration>/Config the value of FromAddress """ return self.opsHelper.getValue("%s/Config/FromAddress" % _rssConfigPath, default) def getConfigStatusType(self, elementType=None): """ Gets all the status types per elementType, if not given, it takes default from CS. If not, hardcoded variable DEFAULT. """ _DEFAULTS = ("all",) res = self.opsHelper.getOptionsDict("%s/Config/StatusTypes" % _rssConfigPath) if res["OK"]: if elementType in res["Value"]: return List.fromChar(res["Value"][elementType]) if "default" in res["Value"]: return List.fromChar(res["Value"]["default"]) return _DEFAULTS
class Limiter(object): def __init__(self, jobDB=None, opsHelper=None): """ Constructor """ self.__runningLimitSection = "JobScheduling/RunningLimit" self.__matchingDelaySection = "JobScheduling/MatchingDelay" self.csDictCache = DictCache() self.condCache = DictCache() self.delayMem = {} if jobDB: self.jobDB = jobDB else: self.jobDB = JobDB() self.log = gLogger.getSubLogger("Limiter") if opsHelper: self.__opsHelper = opsHelper else: self.__opsHelper = Operations() def getNegativeCond(self): """ Get negative condition for ALL sites """ orCond = self.condCache.get("GLOBAL") if orCond: return orCond negCond = {} # Run Limit result = self.__opsHelper.getSections(self.__runningLimitSection) sites = [] if result['OK']: sites = result['Value'] for siteName in sites: result = self.__getRunningCondition(siteName) if not result['OK']: continue data = result['Value'] if data: negCond[siteName] = data # Delay limit result = self.__opsHelper.getSections(self.__matchingDelaySection) sites = [] if result['OK']: sites = result['Value'] for siteName in sites: result = self.__getDelayCondition(siteName) if not result['OK']: continue data = result['Value'] if not data: continue if siteName in negCond: negCond[siteName] = self.__mergeCond(negCond[siteName], data) else: negCond[siteName] = data orCond = [] for siteName in negCond: negCond[siteName]['Site'] = siteName orCond.append(negCond[siteName]) self.condCache.add("GLOBAL", 10, orCond) return orCond def getNegativeCondForSite(self, siteName): """ Generate a negative query based on the limits set on the site """ # Check if Limits are imposed onto the site negativeCond = {} if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True): result = self.__getRunningCondition(siteName) if result['OK']: negativeCond = result['Value'] self.log.verbose('Negative conditions for site %s after checking limits are: %s' % (siteName, str(negativeCond))) if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True): result = self.__getDelayCondition(siteName) if result['OK']: delayCond = result['Value'] self.log.verbose('Negative conditions for site %s after delay checking are: %s' % (siteName, str(delayCond))) negativeCond = self.__mergeCond(negativeCond, delayCond) if negativeCond: self.log.info('Negative conditions for site %s are: %s' % (siteName, str(negativeCond))) return negativeCond def __mergeCond(self, negCond, addCond): """ Merge two negative dicts """ # Merge both negative dicts for attr in addCond: if attr not in negCond: negCond[attr] = [] for value in addCond[attr]: if value not in negCond[attr]: negCond[attr].append(value) return negCond def __extractCSData(self, section): """ Extract limiting information from the CS in the form: { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } """ stuffDict = self.csDictCache.get(section) if stuffDict: return S_OK(stuffDict) result = self.__opsHelper.getSections(section) if not result['OK']: return result attribs = result['Value'] stuffDict = {} for attName in attribs: result = self.__opsHelper.getOptionsDict("%s/%s" % (section, attName)) if not result['OK']: return result attLimits = result['Value'] try: attLimits = dict([(k, int(attLimits[k])) for k in attLimits]) except Exception as excp: errMsg = "%s/%s has to contain numbers: %s" % (section, attName, str(excp)) self.log.error(errMsg) return S_ERROR(errMsg) stuffDict[attName] = attLimits self.csDictCache.add(section, 300, stuffDict) return S_OK(stuffDict) def __getRunningCondition(self, siteName): """ Get extra conditions allowing site throttling """ siteSection = "%s/%s" % (self.__runningLimitSection, siteName) result = self.__extractCSData(siteSection) if not result['OK']: return result limitsDict = result['Value'] # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } if not limitsDict: return S_OK({}) # Check if the site exceeding the given limits negCond = {} for attName in limitsDict: if attName not in self.jobDB.jobAttributeNames: self.log.error("Attribute %s does not exist. Check the job limits" % attName) continue cK = "Running:%s:%s" % (siteName, attName) data = self.condCache.get(cK) if not data: result = self.jobDB.getCounters( 'Jobs', [attName], { 'Site': siteName, 'Status': [ 'Running', 'Matched', 'Stalled']}) if not result['OK']: return result data = result['Value'] data = dict([(k[0][attName], k[1]) for k in data]) self.condCache.add(cK, 10, data) for attValue in limitsDict[attName]: limit = limitsDict[attName][attValue] running = data.get(attValue, 0) if running >= limit: self.log.verbose('Job Limit imposed at %s on %s/%s=%d,' ' %d jobs already deployed' % (siteName, attName, attValue, limit, running)) if attName not in negCond: negCond[attName] = [] negCond[attName].append(attValue) # negCond is something like : {'JobType': ['Merge']} return S_OK(negCond) def updateDelayCounters(self, siteName, jid): # Get the info from the CS siteSection = "%s/%s" % (self.__matchingDelaySection, siteName) result = self.__extractCSData(siteSection) if not result['OK']: return result delayDict = result['Value'] # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } } if not delayDict: return S_OK() attNames = [] for attName in delayDict: if attName not in self.jobDB.jobAttributeNames: self.log.error("Attribute %s does not exist in the JobDB. Please fix it!" % attName) else: attNames.append(attName) result = self.jobDB.getJobAttributes(jid, attNames) if not result['OK']: self.log.error("While retrieving attributes coming from %s: %s" % (siteSection, result['Message'])) return result atts = result['Value'] # Create the DictCache if not there if siteName not in self.delayMem: self.delayMem[siteName] = DictCache() # Update the counters delayCounter = self.delayMem[siteName] for attName in atts: attValue = atts[attName] if attValue in delayDict[attName]: delayTime = delayDict[attName][attValue] self.log.notice("Adding delay for %s/%s=%s of %s secs" % (siteName, attName, attValue, delayTime)) delayCounter.add((attName, attValue), delayTime) return S_OK() def __getDelayCondition(self, siteName): """ Get extra conditions allowing matching delay """ if siteName not in self.delayMem: return S_OK({}) lastRun = self.delayMem[siteName].getKeys() negCond = {} for attName, attValue in lastRun: if attName not in negCond: negCond[attName] = [] negCond[attName].append(attValue) return S_OK(negCond)
class FileCatalog: ro_methods = ['exists', 'isLink', 'readLink', 'isFile', 'getFileMetadata', 'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents', 'resolveDataset', 'getPathPermissions', 'getLFNForPFN', 'getUsers', 'getGroups', 'getFileUserMetadata'] write_methods = ['createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica', 'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost', 'createDirectory', 'setDirectoryStatus', 'removeDirectory', 'removeDataset', 'removeFileFromDataset', 'createDataset'] def __init__( self, catalogs = [], vo = None ): """ Default constructor """ self.valid = True self.timeout = 180 self.readCatalogs = [] self.writeCatalogs = [] self.rootConfigPath = '/Resources/FileCatalogs' self.vo = vo if vo else getVOfromProxyGroup().get( 'Value', None ) self.opHelper = Operations( vo = self.vo ) if type( catalogs ) in types.StringTypes: catalogs = [catalogs] if catalogs: res = self._getSelectedCatalogs( catalogs ) else: res = self._getCatalogs() if not res['OK']: self.valid = False elif ( len( self.readCatalogs ) == 0 ) and ( len( self.writeCatalogs ) == 0 ): self.valid = False def isOK( self ): return self.valid def getReadCatalogs( self ): return self.readCatalogs def getWriteCatalogs( self ): return self.writeCatalogs def __getattr__( self, name ): self.call = name if name in FileCatalog.write_methods: return self.w_execute elif name in FileCatalog.ro_methods: return self.r_execute else: raise AttributeError def w_execute( self, *parms, **kws ): """ Write method executor. """ successful = {} failed = {} failedCatalogs = [] fileInfo = parms[0] res = checkArgumentFormat( fileInfo ) if not res['OK']: return res fileInfo = res['Value'] allLfns = fileInfo.keys() for catalogName, oCatalog, master in self.writeCatalogs: method = getattr( oCatalog, self.call ) res = method( fileInfo, **kws ) if not res['OK']: if master: # If this is the master catalog and it fails we dont want to continue with the other catalogs gLogger.error( "FileCatalog.w_execute: Failed to execute %s on master catalog %s." % ( self.call, catalogName ), res['Message'] ) return res else: # Otherwise we keep the failed catalogs so we can update their state later failedCatalogs.append( ( catalogName, res['Message'] ) ) else: for lfn, message in res['Value']['Failed'].items(): # Save the error message for the failed operations failed.setdefault( lfn, {} )[catalogName] = message if master: # If this is the master catalog then we should not attempt the operation on other catalogs fileInfo.pop( lfn, None ) for lfn, result in res['Value']['Successful'].items(): # Save the result return for each file for the successful operations successful.setdefault( lfn, {} )[catalogName] = result # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog for catalogName, errorMessage in failedCatalogs: for lfn in allLfns: failed.setdefault( lfn, {} )[catalogName] = errorMessage resDict = {'Failed':failed, 'Successful':successful} return S_OK( resDict ) def r_execute( self, *parms, **kws ): """ Read method executor. """ successful = {} failed = {} for _catalogName, oCatalog, _master in self.readCatalogs: method = getattr( oCatalog, self.call ) res = method( *parms, **kws ) if res['OK']: if 'Successful' in res['Value']: for key, item in res['Value']['Successful'].items(): successful.setdefault( key, item ) failed.pop( key, None ) for key, item in res['Value']['Failed'].items(): if key not in successful: failed[key] = item else: return res if not successful and not failed: return S_ERROR( "Failed to perform %s from any catalog" % self.call ) return S_OK( {'Failed':failed, 'Successful':successful} ) ########################################################################################### # # Below is the method for obtaining the objects instantiated for a provided catalogue configuration # def addCatalog( self, catalogName, mode = "Write", master = False ): """ Add a new catalog with catalogName to the pool of catalogs in mode: "Read","Write" or "ReadWrite" """ result = self._generateCatalogObject( catalogName ) if not result['OK']: return result oCatalog = result['Value'] if mode.lower().find( "read" ) != -1: self.readCatalogs.append( ( catalogName, oCatalog, master ) ) if mode.lower().find( "write" ) != -1: self.writeCatalogs.append( ( catalogName, oCatalog, master ) ) return S_OK() def removeCatalog( self, catalogName ): """ Remove the specified catalog from the internal pool """ catalog_removed = False for i in range( len( self.readCatalogs ) ): catalog, _object, _master = self.readCatalogs[i] if catalog == catalogName: del self.readCatalogs[i] catalog_removed = True break for i in range( len( self.writeCatalogs ) ): catalog, _object, _master = self.writeCatalogs[i] if catalog == catalogName: del self.writeCatalogs[i] catalog_removed = True break if catalog_removed: return S_OK() else: return S_OK( 'Catalog does not exist' ) def _getSelectedCatalogs( self, desiredCatalogs ): for catalogName in desiredCatalogs: res = self._generateCatalogObject( catalogName ) if not res['OK']: return res oCatalog = res['Value'] self.readCatalogs.append( ( catalogName, oCatalog, True ) ) self.writeCatalogs.append( ( catalogName, oCatalog, True ) ) return S_OK() def _getCatalogs( self ): # Get the eligible catalogs first # First, look in the Operations, if nothing defined look in /Resources for backward compatibility result = self.opHelper.getSections( '/Services/Catalogs' ) fileCatalogs = [] operationsFlag = False if result['OK']: fileCatalogs = result['Value'] operationsFlag = True else: res = gConfig.getSections( self.rootConfigPath, listOrdered = True ) if not res['OK']: errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration." gLogger.error( errStr, res['Message'] ) return S_ERROR( errStr ) fileCatalogs = res['Value'] # Get the catalogs now for catalogName in fileCatalogs: res = self._getCatalogConfigDetails( catalogName ) if not res['OK']: return res catalogConfig = res['Value'] if operationsFlag: result = self.opHelper.getOptionsDict( '/Services/Catalogs/%s' % catalogName ) if not result['OK']: return result catalogConfig.update( result['Value'] ) if catalogConfig['Status'] == 'Active': res = self._generateCatalogObject( catalogName ) if not res['OK']: return res oCatalog = res['Value'] master = catalogConfig['Master'] # If the catalog is read type if re.search( 'Read', catalogConfig['AccessType'] ): if master: self.readCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.readCatalogs.append( ( catalogName, oCatalog, master ) ) # If the catalog is write type if re.search( 'Write', catalogConfig['AccessType'] ): if master: self.writeCatalogs.insert( 0, ( catalogName, oCatalog, master ) ) else: self.writeCatalogs.append( ( catalogName, oCatalog, master ) ) return S_OK() def _getCatalogConfigDetails( self, catalogName ): # First obtain the options that are available catalogConfigPath = '%s/%s' % ( self.rootConfigPath, catalogName ) res = gConfig.getOptions( catalogConfigPath ) if not res['OK']: errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options." gLogger.error( errStr, catalogName ) return S_ERROR( errStr ) catalogConfig = {} for option in res['Value']: configPath = '%s/%s' % ( catalogConfigPath, option ) optionValue = gConfig.getValue( configPath ) catalogConfig[option] = optionValue # The 'Status' option should be defined (default = 'Active') if 'Status' not in catalogConfig: warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined." gLogger.warn( warnStr, catalogName ) catalogConfig['Status'] = 'Active' # The 'AccessType' option must be defined if 'AccessType' not in catalogConfig: errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined." gLogger.error( errStr, catalogName ) return S_ERROR( errStr ) # Anything other than 'True' in the 'Master' option means it is not catalogConfig['Master'] = ( catalogConfig.setdefault( 'Master', False ) == 'True' ) return S_OK( catalogConfig ) def _generateCatalogObject( self, catalogName ): """ Create a file catalog object from its name and CS description """ useProxy = gConfig.getValue( '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False ) if not useProxy: useProxy = self.opHelper.getValue( '/Services/Catalogs/%s/UseProxy' % catalogName, False ) return FileCatalogFactory().createCatalog( catalogName, useProxy )
class DMSHelpers(object): """ This class is used to get information about sites, SEs and their interrelations """ def __init__(self, vo=False): self.siteSEMapping = {} self.storageElementSet = set() self.siteSet = set() self.__opsHelper = Operations(vo=vo) self.failoverSEs = None self.archiveSEs = None self.notForJobSEs = None def getSiteSEMapping(self): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} """ if self.siteSEMapping: return S_OK(self.siteSEMapping) # Get the list of SEs and keep a mapping of those using an Alias or a # BaseSE storageElements = gConfig.getSections('Resources/StorageElements') if not storageElements['OK']: gLogger.warn('Problem retrieving storage elements', storageElements['Message']) return storageElements storageElements = storageElements['Value'] equivalentSEs = {} for se in storageElements: for option in ('BaseSE', 'Alias'): originalSE = gConfig.getValue( 'Resources/StorageElements/%s/%s' % (se, option)) if originalSE: equivalentSEs.setdefault(originalSE, []).append(se) break siteSEMapping = {} gridTypes = gConfig.getSections('Resources/Sites/') if not gridTypes['OK']: gLogger.warn( 'Problem retrieving sections in /Resources/Sites', gridTypes['Message']) return gridTypes gridTypes = gridTypes['Value'] gLogger.debug('Grid Types are: %s' % (', '.join(gridTypes))) # Get a list of sites and their local SEs siteSet = set() storageElementSet = set() siteSEMapping[LOCAL] = {} for grid in gridTypes: result = gConfig.getSections('/Resources/Sites/%s' % grid) if not result['OK']: gLogger.warn('Problem retrieving /Resources/Sites/%s section' % grid) return result sites = result['Value'] siteSet.update(sites) for site in sites: candidateSEs = gConfig.getValue( '/Resources/Sites/%s/%s/SE' % (grid, site), []) if candidateSEs: candidateSEs += [ eqSE for se in candidateSEs for eqSE in equivalentSEs.get(se, [])] siteSEMapping[LOCAL].setdefault(site, set()).update(candidateSEs) storageElementSet.update(candidateSEs) # Add Sites from the SiteSEMappingByProtocol in the CS siteSEMapping[PROTOCOL] = {} cfgLocalSEPath = cfgPath('SiteSEMappingByProtocol') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result['OK']: sites = result['Value'] for site in sites: candidates = set(self.__opsHelper.getValue( cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet) ) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[PROTOCOL].setdefault(site, set()).update(ses) # Add Sites from the SiteSEMappingByDownload in the CS, else # SiteLocalSEMapping (old convention) siteSEMapping[DOWNLOAD] = {} cfgLocalSEPath = cfgPath('SiteSEMappingByDownload') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if not result['OK']: cfgLocalSEPath = cfgPath('SiteLocalSEMapping') result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result['OK']: sites = result['Value'] for site in sites: candidates = set(self.__opsHelper.getValue( cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet) ) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[DOWNLOAD].setdefault(site, set()).update(ses) self.siteSEMapping = siteSEMapping # Add storage elements that may not be associated with a site result = gConfig.getSections('/Resources/StorageElements') if not result['OK']: gLogger.warn( 'Problem retrieving /Resources/StorageElements section', result['Message']) return result self.storageElementSet = storageElementSet | set(result['Value']) self.siteSet = siteSet return S_OK(siteSEMapping) def getSites(self): """ Get the list of known sites """ self.getSiteSEMapping() return sorted(self.siteSet) def getTiers(self, withStorage=False, tier=None): """ Get the list of sites for a given (list of) Tier level """ sites = sorted(self.getShortSiteNames( withStorage=withStorage, tier=tier).values()) if sites and isinstance(sites[0], list): # List of lists, flatten it sites = [s for sl in sites for s in sl] return sites def getShortSiteNames(self, withStorage=True, tier=None): """ Create a directory of short site names pointing to full site names """ siteDict = {} result = self.getSiteSEMapping() if result['OK']: for site in self.siteSEMapping[LOCAL] if withStorage else self.siteSet: grid, shortSite, _country = site.split('.') if isinstance(tier, (int, long)) and \ (grid != 'LCG' or gConfig.getValue('/Resources/Sites/%s/%s/MoUTierLevel' % (grid, site), 999) != tier): continue if isinstance(tier, (list, tuple, dict, set)) and \ (grid != 'LCG' or gConfig.getValue('/Resources/Sites/%s/%s/MoUTierLevel' % (grid, site), 999) not in tier): continue if withStorage or tier is not None: siteDict[shortSite] = site else: siteDict.setdefault(shortSite, []).append(site) return siteDict def getStorageElements(self): """ Get the list of known SEs """ self.getSiteSEMapping() return sorted(self.storageElementSet) def isSEFailover(self, storageElement): """ Is this SE a failover SE """ if self.failoverSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsUsedForFailover', [])) self.failoverSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.failoverSEs or (not self.failoverSEs and isinstance(storageElement, basestring) and 'FAILOVER' in storageElement.upper()) def isSEForJobs(self, storageElement, checkSE=True): """ Is this SE suitable for making jobs """ if checkSE: self.getSiteSEMapping() if storageElement not in self.storageElementSet: return False if self.notForJobSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsNotToBeUsedForJobs', [])) self.notForJobSEs = resolveSEGroup(seList) return storageElement not in self.notForJobSEs def isSEArchive(self, storageElement): """ Is this SE an archive SE """ if self.archiveSEs is None: seList = resolveSEGroup(self.__opsHelper.getValue( 'DataManagement/SEsUsedForArchive', [])) self.archiveSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.archiveSEs or (not self.archiveSEs and isinstance(storageElement, basestring) and 'ARCHIVE' in storageElement.upper()) def getSitesForSE(self, storageElement, connectionLevel=None): """ Get the (list of) sites for a given SE and a given connctivity """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex == LOCAL: return self._getLocalSitesForSE(storageElement) if connectionIndex == PROTOCOL: return self.getProtocolSitesForSE(storageElement) if connectionIndex == DOWNLOAD: return self.getDownloadSitesForSE(storageElement) return S_ERROR("Unknown connection level") def getLocalSiteForSE(self, se): """ Get the site at which the SE is """ sites = self._getLocalSitesForSE(se) if not sites['OK'] or not sites['Value']: return sites return S_OK(sites['Value'][0]) def _getLocalSitesForSE(self, se): """ Extract the list of sites that declare this SE """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][LOCAL] sites = [site for site in mapping if se in mapping[site]] if len(sites) > 1 and self.__opsHelper.getValue('DataManagement/ForceSingleSitePerSE', True): return S_ERROR('SE is at more than one site') return S_OK(sites) def getProtocolSitesForSE(self, se): """ Get sites that can access the SE by protocol """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][PROTOCOL] sites = self._getLocalSitesForSE(se) if not sites['OK']: return sites sites = set(sites['Value']) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getDownloadSitesForSE(self, se): """ Get the list of sites that are allowed to download files """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR('Non-existing SE') mapping = mapping['Value'][DOWNLOAD] sites = self.getProtocolSitesForSE(se) if not sites['OK']: return sites sites = set(sites['Value']) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getSEsForSite(self, site, connectionLevel=None): """ Get all SEs accessible from a site, given a connectivity """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() if site not in self.siteSet: siteList = [s for s in self.siteSet if '.%s.' % site in s] else: siteList = [site] if not siteList: return S_ERROR("Unknown site") return self._getSEsForSItes(siteList, connectionIndex=connectionIndex) def _getSEsForSItes(self, siteList, connectionIndex): """ Extract list of SEs for a connectivity """ mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping ses = [] for index in range(LOCAL, connectionIndex + 1): for site in siteList: ses += mapping['Value'][index].get(site, []) if not ses: return S_ERROR('No SE found') return S_OK(sorted(ses)) def getSEsAtSite(self, site): """ Get local SEs """ return self.getSEsForSite(site, connectionLevel=LOCAL) def isSameSiteSE(self, se1, se2): """ Are these 2 SEs at the same site """ res = self.getLocalSiteForSE(se1) if not res['OK']: return res site1 = res['Value'] res = self.getLocalSiteForSE(se2) if not res['OK']: return res site2 = res['Value'] return S_OK(site1 == site2) def getSEsAtCountry(self, country, connectionLevel=None): """ Get all SEs at a given country """ connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() siteList = [site for site in self.siteSet if siteCountryName( site) == country.lower()] if not siteList: return S_ERROR("No SEs found in country") return self._getSEsForSItes(siteList, connectionIndex) def getSEInGroupAtSite(self, seGroup, site): """ Get the SE in a group or list of SEs that is present at a site """ seList = self.getAllSEsInGroupAtSite(seGroup, site) if not seList['OK'] or seList['Value'] is None: return seList return S_OK(seList['Value'][0]) def getAllSEsInGroupAtSite(self, seGroup, site): """ Get all SEs in a group or list of SEs that are present at a site """ seList = resolveSEGroup(seGroup) if not seList: return S_ERROR('SEGroup does not exist') sesAtSite = self.getSEsAtSite(site) if not sesAtSite['OK']: return sesAtSite foundSEs = set(seList) & set(sesAtSite['Value']) if not foundSEs: gLogger.warn('No SE found at that site', 'in group %s at %s' % (seGroup, site)) return S_OK() return S_OK(sorted(foundSEs)) def getRegistrationProtocols(self): """ Returns the Favorite registration protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/RegistrationProtocols', ['srm', 'dips']) def getThirdPartyProtocols(self): """ Returns the Favorite third party protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/ThirdPartyProtocols', ['srm']) def getAccessProtocols(self): """ Returns the Favorite access protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/AccessProtocols', ['srm', 'dips']) def getWriteProtocols(self): """ Returns the Favorite Write protocol defined in the CS, or 'srm' as default """ return self.__opsHelper.getValue('DataManagement/WriteProtocols', ['srm', 'dips'])
def addShifter( self, shifters = None ): """ Adds or modify one or more shifters. Also, adds the shifter section in case this is not present. Shifter identities are used in several places, mostly for running agents shifters should be in the form {'ShifterRole':{'User':'******', 'Group':'aDIRACGroup'}} :return: S_OK/S_ERROR """ def getOpsSection(): """ Where is the shifters section? """ vo = CSGlobals.getVO() setup = CSGlobals.getSetup() if vo: res = gConfig.getSections( '/Operations/%s/%s/Shifter' % (vo, setup) ) if res['OK']: return S_OK( '/Operations/%s/%s/Shifter' % ( vo, setup ) ) res = gConfig.getSections( '/Operations/%s/Defaults/Shifter' % vo ) if res['OK']: return S_OK( '/Operations/%s/Defaults/Shifter' % vo ) else: res = gConfig.getSections( '/Operations/%s/Shifter' % setup ) if res['OK']: return S_OK( '/Operations/%s/Shifter' % setup ) res = gConfig.getSections( '/Operations/Defaults/Shifter' ) if res['OK']: return S_OK( '/Operations/Defaults/Shifter' ) return S_ERROR( "No shifter section" ) if shifters is None: shifters = {} if not self.__initialized['OK']: return self.__initialized # get current shifters opsH = Operations( ) currentShifterRoles = opsH.getSections( 'Shifter' ) if not currentShifterRoles['OK']: # we assume the shifter section is not present currentShifterRoles = [] else: currentShifterRoles = currentShifterRoles['Value'] currentShiftersDict = {} for currentShifterRole in currentShifterRoles: currentShifter = opsH.getOptionsDict( 'Shifter/%s' % currentShifterRole ) if not currentShifter['OK']: return currentShifter currentShifter = currentShifter['Value'] currentShiftersDict[currentShifterRole] = currentShifter # Removing from shifters what does not need to be changed for sRole in shifters: if sRole in currentShiftersDict: if currentShiftersDict[sRole] == shifters[sRole]: shifters.pop( sRole ) # get shifters section to modify section = getOpsSection() # Is this section present? if not section['OK']: if section['Message'] == "No shifter section": gLogger.warn( section['Message'] ) gLogger.info( "Adding shifter section" ) vo = CSGlobals.getVO() if vo: section = '/Operations/%s/Defaults/Shifter' % vo else: section = '/Operations/Defaults/Shifter' res = self.__csMod.createSection( section ) if not res: gLogger.error( "Section %s not created" % section ) return S_ERROR( "Section %s not created" % section ) else: gLogger.error( section['Message'] ) return section else: section = section['Value'] #add or modify shifters for shifter in shifters: self.__csMod.removeSection( section + '/' + shifter ) self.__csMod.createSection( section + '/' + shifter ) self.__csMod.createSection( section + '/' + shifter + '/' + 'User' ) self.__csMod.createSection( section + '/' + shifter + '/' + 'Group' ) self.__csMod.setOptionValue( section + '/' + shifter + '/' + 'User', shifters[shifter]['User'] ) self.__csMod.setOptionValue( section + '/' + shifter + '/' + 'Group', shifters[shifter]['Group'] ) self.__csModified = True return S_OK( True )
def web_getLaunchpadSetupWithLFNs(self): #on the fly file catalog for advanced launchpad if not hasattr(self, 'fc'): userData = self.getSessionData() group = str(userData["user"]["group"]) vo = getVOForGroup(group) self.fc = FileCatalog(vo=vo) self.set_header('Content-type', 'text/plain') lfnList = [] arguments = self.request.arguments gLogger.always( "submit: incoming arguments %s to getLaunchpadSetupWithLFNs" % arguments) lfnStr = str(arguments['path'][0]) lfnList = lfnStr.split(',') #checks if the experiments folder in lfn list has a rtg_def.m file at some subfolder gLogger.always("submit: checking if some rtg_def.m" % arguments) processed = [] metaDict = {'type': 'info'} for lfn in lfnStr.split(','): pos_relative = lfn.find("/") pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) pos_relative = lfn.find("/", pos_relative + 1) experiment_lfn = lfn[0:pos_relative] if experiment_lfn in processed: continue processed.append(experiment_lfn) gLogger.always("checking rtg_def.m in %s" % experiment_lfn) result = self.fc.findFilesByMetadata(metaDict, path=str(experiment_lfn)) print "result" print result if not result['OK'] or not result['Value']: gLogger.error("Failed to get type info from $s, %s" % (experiment_lfn, result["Message"])) continue for candidate_lfn in result['Value']: if candidate_lfn.find('rtg_def.m') > 0: lfnList.append(candidate_lfn) totalfn = len(lfnList) ptlfn = '' current = 1 for lfn in lfnList: ptlfn = ptlfn + lfn if current < totalfn: ptlfn = ptlfn + ', ' current = current + 1 defaultParams = { "JobName": [1, 'Eiscat'], "Executable": [1, "/bin/ls"], "Arguments": [1, "-ltrA"], "OutputSandbox": [1, "std.out, std.err"], "InputData": [1, ptlfn], "OutputData": [0, ""], "OutputSE": [1, "EISCAT-disk"], "OutputPath": [0, ""], "CPUTime": [0, "86400"], "Site": [0, ""], "BannedSite": [0, ""], "Platform": [0, "Linux_x86_64_glibc-2.5"], "Priority": [0, "5"], "StdError": [0, "std.err"], "StdOutput": [0, "std.out"], "Parameters": [0, "0"], "ParameterStart": [0, "0"], "ParameterStep": [0, "1"] } delimiter = gConfig.getValue("/Website/Launchpad/ListSeparator", ',') options = self.__getOptionsFromCS(delimiter=delimiter) # platform = self.__getPlatform() # if platform and options: # if not options.has_key("Platform"): # options[ "Platform" ] = platform # else: # csPlatform = list(options[ "Platform" ]) # allPlatforms = csPlatform + platform # platform = uniqueElements(allPlatforms) # options[ "Platform" ] = platform gLogger.debug("Options from CS: %s" % options) override = gConfig.getValue("/Website/Launchpad/OptionsOverride", False) gLogger.info("end __getLaunchpadOpts") # Updating the default values from OptionsOverride configuration branch, for key in options: if key not in defaultParams: defaultParams[key] = [0, ""] defaultParams[key][1] = options[key][0] gLogger.info( "Default params + override from /Website/Launchpad/OptionsOverride -> %s" % defaultParams) # Reading of the predefined sets of launchpad parameters values obj = Operations() predefinedSets = {} launchpadSections = obj.getSections("Launchpad") import pprint if launchpadSections['OK']: for section in launchpadSections["Value"]: predefinedSets[section] = {} sectionOptions = obj.getOptionsDict("Launchpad/" + section) pprint.pprint(sectionOptions) if sectionOptions['OK']: predefinedSets[section] = sectionOptions["Value"] self.write({ "success": "true", "result": defaultParams, "predefinedSets": predefinedSets })
def addShifter(self, shifters=None): """ Adds or modify one or more shifters. Also, adds the shifter section in case this is not present. Shifter identities are used in several places, mostly for running agents :param dict shifters: has to be in the form {'ShifterRole':{'User':'******', 'Group':'aDIRACGroup'}} :return: S_OK/S_ERROR """ def getOpsSection(): """ Where is the shifters section? """ vo = CSGlobals.getVO() setup = CSGlobals.getSetup() if vo: res = gConfig.getSections('/Operations/%s/%s/Shifter' % (vo, setup)) if res['OK']: return S_OK('/Operations/%s/%s/Shifter' % (vo, setup)) res = gConfig.getSections('/Operations/%s/Defaults/Shifter' % vo) if res['OK']: return S_OK('/Operations/%s/Defaults/Shifter' % vo) else: res = gConfig.getSections('/Operations/%s/Shifter' % setup) if res['OK']: return S_OK('/Operations/%s/Shifter' % setup) res = gConfig.getSections('/Operations/Defaults/Shifter') if res['OK']: return S_OK('/Operations/Defaults/Shifter') return S_ERROR("No shifter section") if shifters is None: shifters = {} if not self.__initialized['OK']: return self.__initialized # get current shifters opsH = Operations() currentShifterRoles = opsH.getSections('Shifter') if not currentShifterRoles['OK']: # we assume the shifter section is not present currentShifterRoles = [] else: currentShifterRoles = currentShifterRoles['Value'] currentShiftersDict = {} for currentShifterRole in currentShifterRoles: currentShifter = opsH.getOptionsDict('Shifter/%s' % currentShifterRole) if not currentShifter['OK']: return currentShifter currentShifter = currentShifter['Value'] currentShiftersDict[currentShifterRole] = currentShifter # Removing from shifters what does not need to be changed for sRole in shifters.keys(): # note the pop below if sRole in currentShiftersDict: if currentShiftersDict[sRole] == shifters[sRole]: shifters.pop(sRole) # get shifters section to modify section = getOpsSection() # Is this section present? if not section['OK']: if section['Message'] == "No shifter section": gLogger.warn(section['Message']) gLogger.info("Adding shifter section") vo = CSGlobals.getVO() if vo: section = '/Operations/%s/Defaults/Shifter' % vo else: section = '/Operations/Defaults/Shifter' res = self.__csMod.createSection(section) if not res: gLogger.error("Section %s not created" % section) return S_ERROR("Section %s not created" % section) else: gLogger.error(section['Message']) return section else: section = section['Value'] # add or modify shifters for shifter in shifters: self.__csMod.removeSection(section + '/' + shifter) self.__csMod.createSection(section + '/' + shifter) self.__csMod.createSection(section + '/' + shifter + '/' + 'User') self.__csMod.createSection(section + '/' + shifter + '/' + 'Group') self.__csMod.setOptionValue(section + '/' + shifter + '/' + 'User', shifters[shifter]['User']) self.__csMod.setOptionValue( section + '/' + shifter + '/' + 'Group', shifters[shifter]['Group']) self.csModified = True return S_OK(True)
class GeneratorModels(object): """ Contains the list of known models """ def __init__(self): self.ops = Operations() self.models = {} res = self.ops.getOptionsDict("/Models") if res['OK']: self.models = res['Value'] def hasModel(self, model): """ Check that specified model exists """ if model in self.models: return S_OK() else: return S_ERROR("Model %s is not defined, use any of %s" % (model, self.models.keys())) def getFile(self, model): """ Return the proper model file (usually LesHouches) """ res = self.hasModel(model) if not res['OK']: return res if not self.models[model]: return S_ERROR("No file attached to model %s" % model) return S_OK(self.models[model]) def getParamsForWhizard(self, model): """ When creating the final file, this is needed to get the parameters for the SM """ params = '' if model == 'sm': params = """<GF type="float" value="1.16639E-5"> <!-- Fermi constant --> </GF> <mZ type="float" value="91.1882"> <!-- Z-boson mass --> </mZ> <mW type="float" value="80.419"> <!-- W-boson mass --> </mW> <mH type="float" value="120"> <!-- Higgs mass --> </mH> <alphas type="float" value="0.1178"> <!-- Strong coupling constant alpha_s(MZ) --> </alphas> <me type="float" value="0."> <!-- electron mass --> </me> <mmu type="float" value="0.1066"> <!-- muon mass --> </mmu> <mtau type="float" value="1.777"> <!-- tau-lepton mass --> </mtau> <ms type="float" value="0."> <!-- s-quark mass --> </ms> <mc type="float" value="0.54"> <!-- c-quark mass --> </mc> <mb type="float" value="2.9"> <!-- b-quark mass --> </mb> <mtop type="float" value="174"> <!-- t-quark mass --> </mtop> <wtop type="float" value="1.523"> <!-- t-quark width --> </wtop> <wZ type="float" value="2.443"> <!-- Z-boson width --> </wZ> <wW type="float" value="2.049"> <!-- W-boson width --> </wW> <wH type="float" value="0.3605E-02"> <!-- Higgs width --> </wH> <vckm11 type="float" value="0.97383"> <!-- Vud --> </vckm11> <vckm12 type="float" value="0.2272"> <!-- Vus --> </vckm12> <vckm13 type="float" value="0.00396"> <!-- Vub --> </vckm13> <vckm21 type="float" value="-0.2271"> <!-- Vcd --> </vckm21> <vckm22 type="float" value="0.97296"> <!-- Vcs --> </vckm22> <vckm23 type="float" value="0.04221"> <!-- Vcb --> </vckm23> <vckm31 type="float" value="0.00814"> <!-- Vtd --> </vckm31> <vckm32 type="float" value="-0.04161"> <!-- Vts --> </vckm32> <vckm33 type="float" value="0.99910"> <!-- Vtb --> </vckm33> <khgaz type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgaz> <khgaga type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgaga> <khgg type="float" value="1.000"> <!-- anomaly Higgs coupling K factors --> </khgg> """ else: params = """ """ return S_OK(params)
class DMSHelpers( object ): def __init__( self ): self.siteSEMapping = {} self.storageElementSet = set() self.siteSet = set() self.__opsHelper = Operations() def getSiteSEMapping( self ): """ Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} """ if self.siteSEMapping: return S_OK( self.siteSEMapping ) # Get the list of SEs and keep a mapping of those using an Alias or a BaseSE storageElements = gConfig.getSections( 'Resources/StorageElements' ) if not storageElements['OK']: gLogger.warn( 'Problem retrieving storage elements', storageElements['Message'] ) return storageElements storageElements = storageElements['Value'] equivalentSEs = {} for se in storageElements: for option in ( 'BaseSE', 'Alias' ): originalSE = gConfig.getValue( 'Resources/StorageElements/%s/%s' % ( se, option ) ) if originalSE: equivalentSEs.setdefault( originalSE, [] ).append( se ) break siteSEMapping = {} gridTypes = gConfig.getSections( 'Resources/Sites/' ) if not gridTypes['OK']: gLogger.warn( 'Problem retrieving sections in /Resources/Sites', gridTypes['Message'] ) return gridTypes gridTypes = gridTypes['Value'] gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) ) # Get a list of sites and their local SEs siteSet = set() storageElementSet = set() siteSEMapping[LOCAL] = {} for grid in gridTypes: result = gConfig.getSections( '/Resources/Sites/%s' % grid ) if not result['OK']: gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid ) return result sites = result['Value'] siteSet.update( sites ) for site in sites: candidateSEs = gConfig.getValue( '/Resources/Sites/%s/%s/SE' % ( grid, site ), [] ) if candidateSEs: candidateSEs += [eqSE for se in candidateSEs for eqSE in equivalentSEs.get( se, [] )] siteSEMapping[LOCAL].setdefault( site, set() ).update( candidateSEs ) storageElementSet.update( candidateSEs ) # Add Sites from the SiteSEMappingByProtocol in the CS siteSEMapping[PROTOCOL] = {} cfgLocalSEPath = cfgPath( 'SiteSEMappingByProtocol' ) result = self.__opsHelper.getOptionsDict( cfgLocalSEPath ) if result['OK']: sites = result['Value'] for site in sites: candidates = set( self.__opsHelper.getValue( cfgPath( cfgLocalSEPath, site ), [] ) ) ses = set( resolveSEGroup( candidates - siteSet ) ) | ( candidates & siteSet ) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove( candidate ) ses.update( siteSEMapping[LOCAL][candidate] ) siteSEMapping[PROTOCOL].setdefault( site, set() ).update( ses ) # Add Sites from the SiteSEMappingByDownload in the CS, else SiteLocalSEMapping (old convention) siteSEMapping[DOWNLOAD] = {} cfgLocalSEPath = cfgPath( 'SiteSEMappingByDownload' ) result = self.__opsHelper.getOptionsDict( cfgLocalSEPath ) if not result['OK']: cfgLocalSEPath = cfgPath( 'SiteLocalSEMapping' ) result = self.__opsHelper.getOptionsDict( cfgLocalSEPath ) if result['OK']: sites = result['Value'] for site in sites: candidates = set( self.__opsHelper.getValue( cfgPath( cfgLocalSEPath, site ), [] ) ) ses = set( resolveSEGroup( candidates - siteSet ) ) | ( candidates & siteSet ) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove( candidate ) ses.update( siteSEMapping[LOCAL][candidate] ) siteSEMapping[DOWNLOAD].setdefault( site, set() ).update( ses ) self.siteSEMapping = siteSEMapping self.storageElementSet = storageElementSet self.siteSet = siteSet return S_OK( siteSEMapping ) def getSites( self ): self.getSiteSEMapping() return sorted( self.siteSet ) def getStorageElements( self ): self.getSiteSEMapping() return sorted( self.storageElementSet ) def isSEFailover( self, storageElement ): seList = resolveSEGroup( self.__opsHelper.getValue( 'DataManagement/SEsUsedForFailover', [] ) ) # FIXME: remove string test at some point return storageElement in resolveSEGroup( seList ) or ( not seList and isinstance( storageElement, basestring ) and 'FAILOVER' in storageElement.upper() ) def isSEForJobs( self, storageElement, checkSE = True ): if checkSE: self.getSiteSEMapping() if storageElement not in self.storageElementSet: return False seList = resolveSEGroup( self.__opsHelper.getValue( 'DataManagement/SEsNotToBeUsedForJobs', [] ) ) return storageElement not in resolveSEGroup( seList ) def isSEArchive( self, storageElement ): seList = resolveSEGroup( self.__opsHelper.getValue( 'DataManagement/SEsUsedForArchive', [] ) ) # FIXME: remove string test at some point return storageElement in resolveSEGroup( seList ) or ( not seList and isinstance( storageElement, basestring ) and 'ARCHIVE' in storageElement.upper() ) def getSitesForSE( self, storageElement, connectionLevel = None ): connectionIndex = _getConnectionIndex( connectionLevel, default = DOWNLOAD ) if connectionIndex == LOCAL: return self._getLocalSitesForSE( storageElement ) if connectionIndex == PROTOCOL: return self.getProtocolSitesForSE( storageElement ) if connectionIndex == DOWNLOAD: return self.getDownloadSitesForSE( storageElement ) return S_ERROR( "Unknown connection level" ) def getLocalSiteForSE( self, se ): sites = self._getLocalSitesForSE( se ) if not sites['OK']: return sites return S_OK( sites['Value'][0] ) def _getLocalSitesForSE( self, se ): mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR( 'Non-existing SE' ) mapping = mapping['Value'][LOCAL] sites = [site for site in mapping if se in mapping[site]] if len( sites ) != 1: if self.__opsHelper.getValue( 'DataManagement/ForceSingleSitePerSE', True ): return S_ERROR( 'SE is at more than one site' ) return S_OK( sites ) def getProtocolSitesForSE( self, se ): mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR( 'Non-existing SE' ) mapping = mapping['Value'][PROTOCOL] sites = self._getLocalSitesForSE( se ) if not sites['OK']: return sites sites = set( sites['Value'] ) sites.update( [site for site in mapping if se in mapping[site]] ) return S_OK( sorted( sites ) ) def getDownloadSitesForSE( self, se ): mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping if se not in self.storageElementSet: return S_ERROR( 'Non-existing SE' ) mapping = mapping['Value'][DOWNLOAD] sites = self.getProtocolSitesForSE( se ) if not sites['OK']: return sites sites = set( sites['Value'] ) sites.update( [site for site in mapping if se in mapping[site]] ) return S_OK( sorted( sites ) ) def getSEsForSite( self, site, connectionLevel = None ): connectionIndex = _getConnectionIndex( connectionLevel, default = DOWNLOAD ) if connectionIndex is None: return S_ERROR( "Unknown connection level" ) if not self.siteSet: self.getSiteSEMapping() if site not in self.siteSet: siteList = [s for s in self.siteSet if '.%s.' % site in s] else: siteList = [site] if not siteList: return S_ERROR( "Unknown site" ) return self._getSEsForSItes( siteList, connectionIndex = connectionIndex ) def _getSEsForSItes( self, siteList, connectionIndex ): mapping = self.getSiteSEMapping() if not mapping['OK']: return mapping ses = [] for index in range( LOCAL, connectionIndex + 1 ): for site in siteList: ses += mapping['Value'][index].get( site, [] ) if not ses: return S_ERROR( 'No SE found' ) return S_OK( sorted( ses ) ) def getSEsAtSite( self, site ): return self.getSEsForSite( site, connectionLevel = LOCAL ) def isSameSiteSE( self, se1, se2 ): res = self.getLocalSiteForSE( se1 ) if not res['OK']: return res site1 = res['Value'] res = self.getLocalSiteForSE( se2 ) if not res['OK']: return res site2 = res['Value'] return S_OK( site1 == site2 ) def getSEsAtCountry( self, country, connectionLevel = None ): connectionIndex = _getConnectionIndex( connectionLevel, default = DOWNLOAD ) if connectionIndex is None: return S_ERROR( "Unknown connection level" ) if not self.siteSet: self.getSiteSEMapping() siteList = [site for site in self.siteSet if siteCountryName( site ) == country.lower()] if not siteList: return S_ERROR( "No SEs found in country" ) return self._getSEsForSItes( siteList, connectionIndex ) def getSEInGroupAtSite( self, seGroup, site ): if isinstance( seGroup, basestring ): seList = gConfig.getValue( '/Resources/StorageElementGroups/%s' % seGroup, [] ) else: seList = list( seGroup ) if not seList: return S_ERROR( 'SEGroup does not exist' ) sesAtSite = self.getSEsAtSite( site ) if not sesAtSite['OK']: return sesAtSite sesAtSite = sesAtSite['Value'] se = set( seList ) & set( sesAtSite ) if not se: gLogger.warn( 'No SE found at that site', 'in group %s at %s' % ( seGroup, site ) ) return S_OK() return S_OK( list( se )[0] )
class DMSHelpers(object): """ This class is used to get information about sites, SEs and their interrelations """ def __init__(self, vo=False): self.siteSEMapping = {} self.storageElementSet = set() self.siteSet = set() self.__opsHelper = Operations(vo=vo) self.failoverSEs = None self.archiveSEs = None self.notForJobSEs = None def getSiteSEMapping(self): """Returns a dictionary of all sites and their localSEs as a list, e.g. {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]} """ if self.siteSEMapping: return S_OK(self.siteSEMapping) # Get the list of SEs and keep a mapping of those using an Alias or a # BaseSE storageElements = gConfig.getSections("Resources/StorageElements") if not storageElements["OK"]: gLogger.warn("Problem retrieving storage elements", storageElements["Message"]) return storageElements storageElements = storageElements["Value"] equivalentSEs = {} for se in storageElements: for option in ("BaseSE", "Alias"): originalSE = gConfig.getValue( "Resources/StorageElements/%s/%s" % (se, option)) if originalSE: equivalentSEs.setdefault(originalSE, []).append(se) break siteSEMapping = {} gridTypes = gConfig.getSections("Resources/Sites/") if not gridTypes["OK"]: gLogger.warn("Problem retrieving sections in /Resources/Sites", gridTypes["Message"]) return gridTypes gridTypes = gridTypes["Value"] gLogger.debug("Grid Types are: %s" % (", ".join(gridTypes))) # Get a list of sites and their local SEs siteSet = set() storageElementSet = set() siteSEMapping[LOCAL] = {} for grid in gridTypes: result = gConfig.getSections("/Resources/Sites/%s" % grid) if not result["OK"]: gLogger.warn("Problem retrieving /Resources/Sites/%s section" % grid) return result sites = result["Value"] siteSet.update(sites) for site in sites: candidateSEs = gConfig.getValue( "/Resources/Sites/%s/%s/SE" % (grid, site), []) if candidateSEs: candidateSEs += [ eqSE for se in candidateSEs for eqSE in equivalentSEs.get(se, []) ] siteSEMapping[LOCAL].setdefault(site, set()).update(candidateSEs) storageElementSet.update(candidateSEs) # Add Sites from the SiteSEMappingByProtocol in the CS siteSEMapping[PROTOCOL] = {} cfgLocalSEPath = cfgPath("SiteSEMappingByProtocol") result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result["OK"]: sites = result["Value"] for site in sites: candidates = set( self.__opsHelper.getValue(cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet)) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[PROTOCOL].setdefault(site, set()).update(ses) # Add Sites from the SiteSEMappingByDownload in the CS, else # SiteLocalSEMapping (old convention) siteSEMapping[DOWNLOAD] = {} cfgLocalSEPath = cfgPath("SiteSEMappingByDownload") result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if not result["OK"]: cfgLocalSEPath = cfgPath("SiteLocalSEMapping") result = self.__opsHelper.getOptionsDict(cfgLocalSEPath) if result["OK"]: sites = result["Value"] for site in sites: candidates = set( self.__opsHelper.getValue(cfgPath(cfgLocalSEPath, site), [])) ses = set(resolveSEGroup(candidates - siteSet)) | (candidates & siteSet) # If a candidate is a site, then all local SEs are eligible for candidate in ses & siteSet: ses.remove(candidate) ses.update(siteSEMapping[LOCAL][candidate]) siteSEMapping[DOWNLOAD].setdefault(site, set()).update(ses) self.siteSEMapping = siteSEMapping # Add storage elements that may not be associated with a site result = gConfig.getSections("/Resources/StorageElements") if not result["OK"]: gLogger.warn( "Problem retrieving /Resources/StorageElements section", result["Message"]) return result self.storageElementSet = storageElementSet | set(result["Value"]) self.siteSet = siteSet return S_OK(siteSEMapping) def getSites(self): """Get the list of known sites""" self.getSiteSEMapping() return sorted(self.siteSet) def getTiers(self, withStorage=False, tier=None): """Get the list of sites for a given (list of) Tier level""" sites = sorted( self.getShortSiteNames(withStorage=withStorage, tier=tier).values()) if sites and isinstance(sites[0], list): # List of lists, flatten it sites = [s for sl in sites for s in sl] return sites def getShortSiteNames(self, withStorage=True, tier=None): """Create a directory of short site names pointing to full site names""" siteDict = {} result = self.getSiteSEMapping() if result["OK"]: for site in self.siteSEMapping[ LOCAL] if withStorage else self.siteSet: grid, shortSite, _country = site.split(".") if isinstance(tier, six.integer_types) and ( grid != "LCG" or gConfig.getValue( "/Resources/Sites/%s/%s/MoUTierLevel" % (grid, site), 999) != tier): continue if isinstance(tier, (list, tuple, dict, set)) and (grid != "LCG" or gConfig.getValue( "/Resources/Sites/%s/%s/MoUTierLevel" % (grid, site), 999) not in tier): continue if withStorage or tier is not None: siteDict[shortSite] = site else: siteDict.setdefault(shortSite, []).append(site) return siteDict def getStorageElements(self): """Get the list of known SEs""" self.getSiteSEMapping() return sorted(self.storageElementSet) def isSEFailover(self, storageElement): """Is this SE a failover SE""" if self.failoverSEs is None: seList = resolveSEGroup( self.__opsHelper.getValue("DataManagement/SEsUsedForFailover", [])) self.failoverSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.failoverSEs or ( not self.failoverSEs and isinstance(storageElement, six.string_types) and "FAILOVER" in storageElement.upper()) def isSEForJobs(self, storageElement, checkSE=True): """Is this SE suitable for making jobs""" if checkSE: self.getSiteSEMapping() if storageElement not in self.storageElementSet: return False if self.notForJobSEs is None: seList = resolveSEGroup( self.__opsHelper.getValue( "DataManagement/SEsNotToBeUsedForJobs", [])) self.notForJobSEs = resolveSEGroup(seList) return storageElement not in self.notForJobSEs def isSEArchive(self, storageElement): """Is this SE an archive SE""" if self.archiveSEs is None: seList = resolveSEGroup( self.__opsHelper.getValue("DataManagement/SEsUsedForArchive", [])) self.archiveSEs = resolveSEGroup(seList) # FIXME: remove string test at some point return storageElement in self.archiveSEs or ( not self.archiveSEs and isinstance(storageElement, six.string_types) and "ARCHIVE" in storageElement.upper()) def getSitesForSE(self, storageElement, connectionLevel=None): """Get the (list of) sites for a given SE and a given connctivity""" connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex == LOCAL: return self._getLocalSitesForSE(storageElement) if connectionIndex == PROTOCOL: return self.getProtocolSitesForSE(storageElement) if connectionIndex == DOWNLOAD: return self.getDownloadSitesForSE(storageElement) return S_ERROR("Unknown connection level") def getLocalSiteForSE(self, se): """Get the site at which the SE is""" sites = self._getLocalSitesForSE(se) if not sites["OK"]: return sites if not sites["Value"]: return S_OK(None) return S_OK(sites["Value"][0]) def _getLocalSitesForSE(self, se): """Extract the list of sites that declare this SE""" mapping = self.getSiteSEMapping() if not mapping["OK"]: return mapping if se not in self.storageElementSet: return S_ERROR("Non-existing SE") mapping = mapping["Value"][LOCAL] sites = [site for site in mapping if se in mapping[site]] if len(sites) > 1 and self.__opsHelper.getValue( "DataManagement/ForceSingleSitePerSE", True): return S_ERROR("SE is at more than one site") return S_OK(sites) def getProtocolSitesForSE(self, se): """Get sites that can access the SE by protocol""" mapping = self.getSiteSEMapping() if not mapping["OK"]: return mapping if se not in self.storageElementSet: return S_ERROR("Non-existing SE") mapping = mapping["Value"][PROTOCOL] sites = self._getLocalSitesForSE(se) if not sites["OK"]: return sites sites = set(sites["Value"]) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getDownloadSitesForSE(self, se): """Get the list of sites that are allowed to download files""" mapping = self.getSiteSEMapping() if not mapping["OK"]: return mapping if se not in self.storageElementSet: return S_ERROR("Non-existing SE") mapping = mapping["Value"][DOWNLOAD] sites = self.getProtocolSitesForSE(se) if not sites["OK"]: return sites sites = set(sites["Value"]) sites.update([site for site in mapping if se in mapping[site]]) return S_OK(sorted(sites)) def getSEsForSite(self, site, connectionLevel=None): """Get all SEs accessible from a site, given a connectivity""" connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() if site not in self.siteSet: siteList = [s for s in self.siteSet if ".%s." % site in s] else: siteList = [site] if not siteList: return S_ERROR("Unknown site") return self._getSEsForSItes(siteList, connectionIndex=connectionIndex) def _getSEsForSItes(self, siteList, connectionIndex): """Extract list of SEs for a connectivity""" mapping = self.getSiteSEMapping() if not mapping["OK"]: return mapping ses = [] for index in range(LOCAL, connectionIndex + 1): for site in siteList: ses += mapping["Value"][index].get(site, []) if not ses: return S_ERROR("No SE found") return S_OK(sorted(ses)) def getSEsAtSite(self, site): """Get local SEs""" return self.getSEsForSite(site, connectionLevel=LOCAL) def isSameSiteSE(self, se1, se2): """Are these 2 SEs at the same site""" res = self.getLocalSiteForSE(se1) if not res["OK"]: return res site1 = res["Value"] res = self.getLocalSiteForSE(se2) if not res["OK"]: return res site2 = res["Value"] return S_OK(site1 == site2) def getSEsAtCountry(self, country, connectionLevel=None): """Get all SEs at a given country""" connectionIndex = _getConnectionIndex(connectionLevel, default=DOWNLOAD) if connectionIndex is None: return S_ERROR("Unknown connection level") if not self.siteSet: self.getSiteSEMapping() siteList = [ site for site in self.siteSet if siteCountryName(site) == country.lower() ] if not siteList: return S_ERROR("No SEs found in country") return self._getSEsForSItes(siteList, connectionIndex) def getSEInGroupAtSite(self, seGroup, site): """Get the SE in a group or list of SEs that is present at a site""" seList = self.getAllSEsInGroupAtSite(seGroup, site) if not seList["OK"] or seList["Value"] is None: return seList return S_OK(seList["Value"][0]) def getAllSEsInGroupAtSite(self, seGroup, site): """Get all SEs in a group or list of SEs that are present at a site""" seList = resolveSEGroup(seGroup) if not seList: return S_ERROR("SEGroup does not exist") sesAtSite = self.getSEsAtSite(site) if not sesAtSite["OK"]: return sesAtSite foundSEs = set(seList) & set(sesAtSite["Value"]) if not foundSEs: gLogger.warn("No SE found at that site", "in group %s at %s" % (seGroup, site)) return S_OK() return S_OK(sorted(foundSEs)) def getRegistrationProtocols(self): """Returns the Favorite registration protocol defined in the CS, or 'srm' as default""" return self.__opsHelper.getValue( "DataManagement/RegistrationProtocols", ["srm", "dips"]) def getThirdPartyProtocols(self): """Returns the Favorite third party protocol defined in the CS, or 'srm' as default""" return self.__opsHelper.getValue("DataManagement/ThirdPartyProtocols", ["srm"]) def getAccessProtocols(self): """Returns the Favorite access protocol defined in the CS, or 'srm' as default""" return self.__opsHelper.getValue("DataManagement/AccessProtocols", ["srm", "dips"]) def getWriteProtocols(self): """Returns the Favorite Write protocol defined in the CS, or 'srm' as default""" return self.__opsHelper.getValue("DataManagement/WriteProtocols", ["srm", "dips"]) def getStageProtocols(self): """Returns the Favorite staging protocol defined in the CS. There are no default""" return self.__opsHelper.getValue("DataManagement/StageProtocols", list()) def getMultiHopMatrix(self): """ Returns the multi-hop matrix described in DataManagement/MultiHopMatrixOfShame. .. code-block :: python 'Default': { 'Default': 'MultiHopSEUsedForAllTransfer', 'Dst3' : 'MultiHopFromAnySourceToDst3', } 'Src1' : { 'Default' : 'DefaultMultiHopSEFromSrc1', 'Dst1': 'MultiHopSEFromSrc1ToDst1}, 'Src2' : { 'Default' : 'DefaultMultiHopSEFromSrc2', 'Dst2': 'MultiHopSEFromSrc1ToDst2} :returns: dict of dict for all the source se / dest SE defined. We user defaultdict to allow for the use of non existing source/dest. """ matrixBasePath = "DataManagement/MultiHopMatrixOfShame" multiHopMatrix = defaultdict(lambda: defaultdict(lambda: None)) allSrcSEs = self.__opsHelper.getSections(matrixBasePath).get( "Value", []) for src in allSrcSEs: srcDst = self.__opsHelper.getOptionsDict( cfgPath(matrixBasePath, src)).get("Value") if srcDst: multiHopMatrix[src].update(srcDst) return multiHopMatrix