Ejemplo n.º 1
0
    def export_getSiteSummarySelectors(self):
        """ Get all the distinct selector values for the site summary web portal page
    """

        resultDict = {}
        statusList = ['Good', 'Fair', 'Poor', 'Bad', 'Idle']
        resultDict['Status'] = statusList
        maskStatus = ['Active', 'Banned', 'NoMask', 'Reduced']
        resultDict['MaskStatus'] = maskStatus

        resources = Resources()
        result = resources.getSites()
        if not result['OK']:
            return result
        siteList = result['Value']

        countryList = []
        for site in siteList:
            if site.find('.') != -1:
                country = site.split('.')[1]
                country = country.lower()
                if country not in countryList:
                    countryList.append(country)
        countryList.sort()
        resultDict['Country'] = countryList
        siteList.sort()
        resultDict['Site'] = siteList

        return S_OK(resultDict)
Ejemplo n.º 2
0
    def __init__(self, catalogs=[], vo=None):
        """ Default constructor
    """
        self.valid = True
        self.timeout = 180
        self.readCatalogs = []
        self.writeCatalogs = []
        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return result
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)
        self.reHelper = Resources(vo=self.vo)

        if type(catalogs) in types.StringTypes:
            catalogs = [catalogs]
        if catalogs:
            res = self._getSelectedCatalogs(catalogs)
        else:
            res = self._getCatalogs()
        if not res['OK']:
            self.valid = False
        elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0):
            self.valid = False
Ejemplo n.º 3
0
  def export_getSiteSummarySelectors(self):
    """ Get all the distinct selector values for the site summary web portal page
    """

    resultDict = {}
    statusList = ['Good','Fair','Poor','Bad','Idle']
    resultDict['Status'] = statusList
    maskStatus = ['Active','Banned','NoMask','Reduced']
    resultDict['MaskStatus'] = maskStatus

    resources = Resources()
    result = resources.getSites()
    if not result['OK']:
      return result
    siteList = result['Value']

    countryList = []
    for site in siteList:
      if site.find('.') != -1:
        country = site.split('.')[1]
        country = country.lower()
        if country not in countryList:
          countryList.append(country)
    countryList.sort()
    resultDict['Country'] = countryList
    siteList.sort()
    resultDict['Site'] = siteList

    return S_OK(resultDict)
Ejemplo n.º 4
0
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)
Ejemplo n.º 5
0
def getSiteCEMapping():
  """ Returns a dictionary of all sites and their CEs as a list, e.g.
      {'LCG.CERN.ch':['ce101.cern.ch',...]}
      If gridName is specified, result is restricted to that Grid type.
  """
  siteCEMapping = {}
  resourceHelper = Resources()
  result = resourceHelper.getEligibleSites()
  if not result['OK']:
    return result
  sites = result['Value']
  
  for site in sites:
    result = resourceHelper.getEligibleResources( 'Computing', {'Site':site} )
    if not result['OK']:
      continue
    ceList = result['Value']
    
    result = getSiteFullNames( site )
    if not result['OK']:
      continue
    for sName in result['Value']:
      siteCEMapping[sName] = ceList   

  return S_OK( siteCEMapping )
Ejemplo n.º 6
0
def getSiteCEMapping():
    """ Returns a dictionary of all sites and their CEs as a list, e.g.
      {'LCG.CERN.ch':['ce101.cern.ch',...]}
      If gridName is specified, result is restricted to that Grid type.
  """
    siteCEMapping = {}
    resourceHelper = Resources()
    result = resourceHelper.getEligibleSites()
    if not result['OK']:
        return result
    sites = result['Value']

    for site in sites:
        result = resourceHelper.getEligibleResources('Computing',
                                                     {'Site': site})
        if not result['OK']:
            continue
        ceList = result['Value']

        result = getSiteFullNames(site)
        if not result['OK']:
            continue
        for sName in result['Value']:
            siteCEMapping[sName] = ceList

    return S_OK(siteCEMapping)
Ejemplo n.º 7
0
    def createCatalog(self,
                      catalogName,
                      useProxy=False,
                      vo=None,
                      catalogConfig={}):
        """ Create a file catalog object from its name and CS description
    """
        if useProxy:
            catalog = FileCatalogProxyClient(catalogName)
            return S_OK(catalog)

        # get the CS description first
        catConfig = catalogConfig
        if not catConfig:
            if not vo:
                result = getVOfromProxyGroup()
                if not result['OK']:
                    return result
                vo = result['Value']
            reHelper = Resources(vo=vo)
            result = reHelper.getCatalogOptionsDict(catalogName)
            if not result['OK']:
                return result
            catConfig = result['Value']

        catalogType = catConfig.get('CatalogType', catalogName)
        catalogURL = catConfig.get('CatalogURL', '')

        self.log.verbose('Creating %s client' % catalogName)
        moduleRootPaths = getInstalledExtensions()
        for moduleRootPath in moduleRootPaths:
            gLogger.verbose("Trying to load from root path %s" %
                            moduleRootPath)
            #moduleFile = os.path.join( rootPath, moduleRootPath, "Resources", "Catalog", "%sClient.py" % catalogType )
            #gLogger.verbose( "Looking for file %s" % moduleFile )
            #if not os.path.isfile( moduleFile ):
            #  continue
            try:
                # This enforces the convention that the plug in must be named after the file catalog
                moduleName = "%sClient" % (catalogType)
                catalogModule = __import__(
                    '%s.Resources.Catalog.%s' % (moduleRootPath, moduleName),
                    globals(), locals(), [moduleName])
            except ImportError, x:
                if "No module" in str(x):
                    gLogger.debug('Catalog module %s not found in %s' %
                                  (catalogType, moduleRootPath))
                else:
                    errStr = "Failed attempt to import %s from the path %s: %s" % (
                        catalogType, moduleRootPath, x)
                    gLogger.error(errStr)
                continue
            except Exception, x:
                errStr = "Failed attempt to import %s from the path %s: %s" % (
                    catalogType, moduleRootPath, x)
                gLogger.error(errStr)
                continue
Ejemplo n.º 8
0
    def createCatalog(self,
                      catalogName,
                      useProxy=False,
                      vo=None,
                      catalogConfig={}):
        """ Create a file catalog object from its name and CS description
    """
        if useProxy:
            catalog = FileCatalogProxyClient(catalogName)
            return S_OK(catalog)

        # get the CS description first
        catConfig = catalogConfig
        if not catConfig:
            if not vo:
                result = getVOfromProxyGroup()
                if not result['OK']:
                    return result
                vo = result['Value']
            reHelper = Resources(vo=vo)
            result = reHelper.getCatalogOptionsDict(catalogName)
            if not result['OK']:
                return result
            catConfig = result['Value']

        catalogType = catConfig.get('CatalogType', catalogName)
        catalogURL = catConfig.get('CatalogURL', '')

        self.log.verbose('Creating %s client' % catalogName)

        objectLoader = ObjectLoader()
        result = objectLoader.loadObject(
            'Resources.Catalog.%sClient' % catalogType, catalogType + 'Client')
        if not result['OK']:
            gLogger.error('Failed to load catalog object: %s' %
                          result['Message'])
            return result

        catalogClass = result['Value']

        try:
            if catalogType in ['LcgFileCatalogCombined', 'LcgFileCatalog']:
                # The LFC special case
                infoSys = catConfig.get('LcgGfalInfosys', '')
                host = catConfig.get('MasterHost', '')
                catalog = catalogClass(infoSys, host)
            else:
                if catalogURL:
                    catalog = catalogClass(url=catalogURL)
                else:
                    catalog = catalogClass()
            self.log.debug('Loaded module %sClient' % catalogType)
            return S_OK(catalog)
        except Exception, x:
            errStr = "Failed to instantiate %s()" % (catalogType)
            gLogger.exception(errStr, lException=x)
            return S_ERROR(errStr)
Ejemplo n.º 9
0
def getCEsForSite( siteName ):
  """ Given a DIRAC site name this method returns a list of corresponding CEs.
  """
  resourceHelper = Resources()
  result = resourceHelper.getEligibleResources( 'Computing', {'Site':siteName} )
  if not result['OK']:
    return result
  ceList = result['Value']
  
  return S_OK( ceList )  
Ejemplo n.º 10
0
def getCEsForSite(siteName):
    """ Given a DIRAC site name this method returns a list of corresponding CEs.
  """
    resourceHelper = Resources()
    result = resourceHelper.getEligibleResources('Computing',
                                                 {'Site': siteName})
    if not result['OK']:
        return result
    ceList = result['Value']

    return S_OK(ceList)
Ejemplo n.º 11
0
def getQueueInfo(ceUniqueID):
    """
    Extract information from full CE Name including associate DIRAC Site
  """
    try:
        subClusterUniqueID = ceUniqueID.split('/')[0].split(':')[0]
        queueID = ceUniqueID.split('/')[1]
    except:
        return S_ERROR('Wrong full queue Name')

    result = getSiteForCE(subClusterUniqueID)
    if not result['OK']:
        return result
    diracSiteName = result['Value']

    if not diracSiteName:
        return S_ERROR('Can not find corresponding Site in CS')

    resourceHelper = Resources()
    result = getSiteName(diracSiteName)
    site = result['Value']
    domain = result.get('Domain', 'Unknonw')
    country = result.get('Country', 'xx')

    result = resourceHelper.getQueueOptionsDict(site, subClusterUniqueID,
                                                queueID)
    if not result['OK']:
        return result
    queueDict = result['Value']
    maxCPUTime = queueDict.get('maxCPUTime', 0)
    SI00 = queueDict.get('SI00', 0)

    if not maxCPUTime or not SI00:
        result = resourceHelper.getComputingOptionsDict(
            site, subClusterUniqueID)
        if not result['OK']:
            return result
        ceDict = result['Value']
        if not maxCPUTime:
            maxCPUTime = ceDict.get('maxCPUTime', 0)
        if not SI00:
            SI00 = ceDict.get('SI00', 0)

    resultDict = {
        'SubClusterUniqueID': subClusterUniqueID,
        'QueueID': queueID,
        'SiteName': diracSiteName,
        'Domain': domain,
        'Country': country,
        'maxCPUTime': maxCPUTime,
        'SI00': SI00
    }

    return S_OK(resultDict)
Ejemplo n.º 12
0
 def __checkSEs( self, seList ):
   resources = Resources()
   res = resources.getEligibleResources( 'Storage' )
   if not res['OK']:
     return self._errorReport( res, 'Failed to get possible StorageElements' )
   missing = []
   for se in seList:
     if not se in res['Value']:
       gLogger.error( "StorageElement %s is not known" % se )
       missing.append( se )
   if missing:
     return S_ERROR( "%d StorageElements not known" % len( missing ) )
   return S_OK()
Ejemplo n.º 13
0
 def createCatalog( self, catalogName, useProxy = False, vo = None, catalogConfig = {} ):
   """ Create a file catalog object from its name and CS description
   """    
   if useProxy:
     catalog = FileCatalogProxyClient( catalogName )
     return S_OK( catalog )
   
   # get the CS description first
   catConfig = catalogConfig
   if not catConfig:
     if not vo:
       result = getVOfromProxyGroup()
       if not result['OK']:
         return result
       vo = result['Value']
     reHelper = Resources( vo = vo )
     result = reHelper.getCatalogOptionsDict( catalogName )
     if not result['OK']:
       return result
     catConfig = result['Value']
   
   catalogType = catConfig.get('CatalogType',catalogName)
   catalogURL = catConfig.get('CatalogURL','')
   
   self.log.verbose( 'Creating %s client' % catalogName )
   
   objectLoader = ObjectLoader()
   result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType+'Client' )
   if not result['OK']:
     gLogger.error( 'Failed to load catalog object: %s' % result['Message'] )
     return result
   
   catalogClass = result['Value']
    
   try:
     if catalogType in ['LcgFileCatalogCombined','LcgFileCatalog']:
       # The LFC special case
       infoSys = catConfig.get('LcgGfalInfosys','')
       host = catConfig.get('MasterHost','')
       catalog = catalogClass( infoSys, host )
     else:  
       if catalogURL:
         catalog = catalogClass( url = catalogURL )  
       else:  
         catalog = catalogClass()
     self.log.debug('Loaded module %sClient' % catalogType )
     return S_OK( catalog )
   except Exception, x:
     errStr = "Failed to instantiate %s()" % ( catalogType )
     gLogger.exception( errStr, lException = x )
     return S_ERROR( errStr )
Ejemplo n.º 14
0
def getQueueInfo( ceUniqueID ):
  """
    Extract information from full CE Name including associate DIRAC Site
  """
  try:
    subClusterUniqueID = ceUniqueID.split( '/' )[0].split( ':' )[0]
    queueID = ceUniqueID.split( '/' )[1]
  except:
    return S_ERROR( 'Wrong full queue Name' )

  result = getSiteForCE( subClusterUniqueID )
  if not result['OK']:
    return result
  diracSiteName = result['Value']

  if not diracSiteName:
    return S_ERROR( 'Can not find corresponding Site in CS' )
  
  resourceHelper = Resources()
  result = getSiteName( diracSiteName )
  site = result['Value']
  domain = result.get( 'Domain', 'Unknonw' )
  country = result.get( 'Country', 'xx' )
  
  result = resourceHelper.getQueueOptionsDict( site, subClusterUniqueID, queueID )
  if not result['OK']:
    return result
  queueDict = result['Value']
  maxCPUTime = queueDict.get( 'maxCPUTime', 0 )
  SI00 = queueDict.get( 'SI00', 0 ) 
  
  if not maxCPUTime or not SI00:
    result = resourceHelper.getComputingOptionsDict( site, subClusterUniqueID )
    if not result['OK']:
      return result
    ceDict = result['Value']
    if not maxCPUTime:
      maxCPUTime = ceDict.get( 'maxCPUTime', 0 )
    if not SI00:
      SI00 = ceDict.get( 'SI00', 0 )   

  resultDict = { 'SubClusterUniqueID': subClusterUniqueID,
                 'QueueID': queueID,
                 'SiteName': diracSiteName,
                 'Domain': domain,
                 'Country': country,
                 'maxCPUTime': maxCPUTime,
                 'SI00': SI00 }

  return S_OK( resultDict )
Ejemplo n.º 15
0
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)
Ejemplo n.º 16
0
    def initialize(self):
        self.replicaManager = ReplicaManager()
        #self.stagerClient = StorageManagerClient()
        self.dataIntegrityClient = DataIntegrityClient()
        self.storageDB = StorageManagementDB()
        # pin lifetime = 1 day
        self.pinLifetime = self.am_getOption('PinLifetime', THROTTLING_TIME)
        # Resources helper
        self.resources = Resources()

        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/DataManager
        # the shifterProxy option in the Configuration can be used to change this default.
        self.am_setOption('shifterProxy', 'DataManager')

        return S_OK()
Ejemplo n.º 17
0
    def __init__(self):
        """
    Constructor.
    
    examples:
      >>> s = Synchronizer()
    """

        self.log = gLogger.getSubLogger(self.__class__.__name__)
        self.operations = Operations()
        self.resources = Resources()

        self.rStatus = ResourceStatusClient.ResourceStatusClient()
        self.rssConfig = RssConfiguration()

        self.diracAdmin = DiracAdmin()
Ejemplo n.º 18
0
 def __init__(self, database=None):
     self.db = None
     if database is not None:
         self.setDatabase(database)
     self.lock = threading.Lock()
     self.seUpdatePeriod = 600
     self.resourcesHelper = Resources()
     self._refreshSEs()
Ejemplo n.º 19
0
def getCESiteMapping():
  """ Returns a dictionary of all CEs and their associated site, e.g.
      {'ce101.cern.ch':'LCG.CERN.ch', ...]}
  """
  ceSiteMapping = {}
  resourceHelper = Resources()
  result = resourceHelper.getEligibleResources( 'Computing' )
  if not result['OK']:
    return result
  ceList = result['Value']
  for ce in ceList:
    result = getSiteForCE( ce )
    if not result['OK']:
      continue
    site = result['Value']
    ceSiteMapping[ce] = site

  return S_OK( ceSiteMapping )
Ejemplo n.º 20
0
def getCESiteMapping():
    """ Returns a dictionary of all CEs and their associated site, e.g.
      {'ce101.cern.ch':'LCG.CERN.ch', ...]}
  """
    ceSiteMapping = {}
    resourceHelper = Resources()
    result = resourceHelper.getEligibleResources('Computing')
    if not result['OK']:
        return result
    ceList = result['Value']
    for ce in ceList:
        result = getSiteForCE(ce)
        if not result['OK']:
            continue
        site = result['Value']
        ceSiteMapping[ce] = site

    return S_OK(ceSiteMapping)
Ejemplo n.º 21
0
def getSESiteMapping( gridName = '' ):
  """ Returns a dictionary of all SEs and their associated site(s), e.g.
      {'CERN-RAW':'LCG.CERN.ch','CERN-RDST':'LCG.CERN.ch',...]}
      Although normally one site exists for a given SE, it is possible over all
      Grid types to have multiple entries.
      If gridName is specified, result is restricted to that Grid type.
  """
  seSiteMapping = {}
  resourceHelper = Resources()
  result = resourceHelper.getEligibleResources( 'Storage' )
  if not result['OK']:
    return result
  seList = result['Value']
  for se in seList:
    result = getSitesForSE( se )
    if not result['OK']:
      continue
    site = result['Value']
    seSiteMapping[se] = site

  return S_OK( seSiteMapping )
Ejemplo n.º 22
0
def getSESiteMapping(gridName=''):
    """ Returns a dictionary of all SEs and their associated site(s), e.g.
      {'CERN-RAW':'LCG.CERN.ch','CERN-RDST':'LCG.CERN.ch',...]}
      Although normally one site exists for a given SE, it is possible over all
      Grid types to have multiple entries.
      If gridName is specified, result is restricted to that Grid type.
  """
    seSiteMapping = {}
    resourceHelper = Resources()
    result = resourceHelper.getEligibleResources('Storage')
    if not result['OK']:
        return result
    seList = result['Value']
    for se in seList:
        result = getSitesForSE(se)
        if not result['OK']:
            continue
        site = result['Value']
        seSiteMapping[se] = site

    return S_OK(seSiteMapping)
Ejemplo n.º 23
0
def getSiteSEMapping():
    """ Returns a dictionary of all sites and their localSEs as a list, e.g.
      {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]}
      If gridName is specified, result is restricted to that Grid type.
  """
    siteSEMapping = {}
    resourceHelper = Resources()
    result = resourceHelper.getEligibleSites()
    if not result['OK']:
        return result
    sites = result['Value']

    for site in sites:
        result = resourceHelper.getEligibleResources('Storage', {'Site': site})
        if not result['OK']:
            continue
        seList = result['Value']

        result = getSiteFullNames(site)
        if not result['OK']:
            continue
        for sName in result['Value']:
            siteSEMapping[sName] = seList

    # Add Sites from the SiteToLocalSEMapping in the CS
    opsHelper = Operations()
    result = opsHelper.getSiteMapping('Storage', 'LocalSE')
    if result['OK']:
        mapping = result['Value']
        for site in mapping:
            if site not in siteSEMapping:
                siteSEMapping[site] = mapping[site]
            else:
                for se in mapping[site]:
                    if se not in siteSEMapping[site]:
                        siteSEMapping[site].append(se)

    return S_OK(siteSEMapping)
Ejemplo n.º 24
0
def getSiteSEMapping():
  """ Returns a dictionary of all sites and their localSEs as a list, e.g.
      {'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]}
      If gridName is specified, result is restricted to that Grid type.
  """
  siteSEMapping = {}
  resourceHelper = Resources()
  result = resourceHelper.getEligibleSites()
  if not result['OK']:
    return result
  sites = result['Value']
  
  for site in sites:
    result = resourceHelper.getEligibleResources( 'Storage', {'Site':site} )
    if not result['OK']:
      continue
    seList = result['Value']
    
    result = getSiteFullNames( site )
    if not result['OK']:
      continue
    for sName in result['Value']:
      siteSEMapping[sName] = seList   

  # Add Sites from the SiteToLocalSEMapping in the CS
  opsHelper = Operations()
  result = opsHelper.getSiteMapping( 'Storage', 'LocalSE' )
  if result['OK']:
    mapping = result['Value']
    for site in mapping:
      if site not in siteSEMapping:
        siteSEMapping[site] = mapping[site]
      else:  
        for se in mapping[site]:
          if se not in siteSEMapping[site]:
            siteSEMapping[site].append( se )

  return S_OK( siteSEMapping )
Ejemplo n.º 25
0
    def __init__(self, args=None, clients=None):

        super(DowntimeCommand, self).__init__(args, clients)

        self.resources = Resources()

        if 'GOCDBClient' in self.apis:
            self.gClient = self.apis['GOCDBClient']
        else:
            self.gClient = GOCDBClient()

        if 'ResourceManagementClient' in self.apis:
            self.rmClient = self.apis['ResourceManagementClient']
        else:
            self.rmClient = ResourceManagementClient()
Ejemplo n.º 26
0
 def __init__( self ):
   """
   Constructor.
   
   examples:
     >>> s = Synchronizer()
   """
   
   self.log        = gLogger.getSubLogger( self.__class__.__name__ )
   self.operations = Operations()
   self.resources  = Resources()
   
   self.rStatus    = ResourceStatusClient.ResourceStatusClient()  
   self.rssConfig  = RssConfiguration()
 
   self.diracAdmin = DiracAdmin()
Ejemplo n.º 27
0
  def initialize( self ):
    self.replicaManager = ReplicaManager()
    #self.stagerClient = StorageManagerClient()
    self.dataIntegrityClient = DataIntegrityClient()
    self.storageDB = StorageManagementDB()
    # pin lifetime = 1 day
    self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
    # Resources helper
    self.resources = Resources()

    # This sets the Default Proxy to used as that defined under
    # /Operations/Shifter/DataManager
    # the shifterProxy option in the Configuration can be used to change this default.
    self.am_setOption( 'shifterProxy', 'DataManager' )

    return S_OK()
Ejemplo n.º 28
0
    def export_getSiteMaskSummary(self):
        """ Get the mask status for all the configured sites
    """

        # Get all the configured site names
        result = Resources().getSites()
        if not result['OK']:
            return result
        sites = result['Value']

        # Get the current mask status
        result = jobDB.getSiteMaskStatus()
        siteDict = result['Value']
        for site in sites:
            if site not in siteDict:
                siteDict[site] = 'Unknown'

        return S_OK(siteDict)
Ejemplo n.º 29
0
class InputDataAgent(OptimizerModule):
    """
      The specific Optimizer must provide the following methods:
      - initializeOptimizer() before each execution cycle
      - checkJob() - the main method called for each job
  """

    #############################################################################
    def initializeOptimizer(self):
        """Initialize specific parameters for JobSanityAgent.
    """
        self.failedMinorStatus = self.am_getOption('/FailedJobStatus',
                                                   'Input Data Not Available')
        #this will ignore failover SE files
        self.checkFileMetadata = self.am_getOption('CheckFileMetadata', True)

        #Define the shifter proxy needed
        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/ProductionManager
        # the shifterProxy option in the Configuration can be used to change this default.
        self.am_setOption('shifterProxy', 'ProductionManager')

        try:
            self.replicaManager = ReplicaManager()
        except Exception, e:
            msg = 'Failed to create ReplicaManager'
            self.log.exception(msg)
            return S_ERROR(msg + str(e))

        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources()

        self.seToSiteMapping = {}
        self.lastCScheck = 0
        self.cacheLength = 600

        return S_OK()
Ejemplo n.º 30
0
def initSEs():
    '''
    Initializes SEs statuses taking their values from the CS.
  '''

    subLogger.info('Initializing SEs')

    resources = Resources()

    ses = resources.getEligibleStorageElements()
    if not ses['OK']:
        return ses
    ses = ses['Value']

    statuses = StateMachine.RSSMachine(None).getStates()
    statusTypes = RssConfiguration.RssConfiguration().getConfigStatusType(
        'StorageElement')
    reason = 'dirac-rss-sync'

    subLogger.debug(statuses)
    subLogger.debug(statusTypes)

    rssClient = ResourceStatusClient.ResourceStatusClient()

    for se in ses:

        subLogger.debug(se)

        #opts = gConfig.getOptionsDict( '/Resources/StorageElements/%s' % se )
        opts = resources.getStorageElementOptionsDict(se)
        if not opts['OK']:
            subLogger.warn(opts['Message'])
            continue
        opts = opts['Value']

        subLogger.debug(opts)

        # We copy the list into a new object to remove items INSIDE the loop !
        statusTypesList = statusTypes[:]

        for statusType, status in opts.iteritems():

            #Sanity check...
            if not statusType in statusTypesList:
                continue

            #Transforms statuses to RSS terms
            if status in ('NotAllowed', 'InActive'):
                status = 'Banned'

            if not status in statuses:
                subLogger.error('%s not a valid status for %s - %s' %
                                (status, se, statusType))
                continue

            # We remove from the backtracking
            statusTypesList.remove(statusType)

            subLogger.debug([se, statusType, status, reason])
            result = rssClient.modifyStatusElement('Resource',
                                                   'Status',
                                                   name=se,
                                                   statusType=statusType,
                                                   status=status,
                                                   reason=reason)

            if not result['OK']:
                subLogger.error('Failed to modify')
                subLogger.error(result['Message'])
                continue

        #Backtracking: statusTypes not present on CS
        for statusType in statusTypesList:

            result = rssClient.modifyStatusElement('Resource',
                                                   'Status',
                                                   name=se,
                                                   statusType=statusType,
                                                   status=DEFAULT_STATUS,
                                                   reason=reason)
            if not result['OK']:
                subLogger.error('Error in backtracking for %s,%s,%s' %
                                (se, statusType, status))
                subLogger.error(result['Message'])

    return S_OK()
Ejemplo n.º 31
0
  return DIRAC.S_OK()

ceFlag = False
def setCEFlag( args ):
  global ceFlag
  ceFlag = True
  return DIRAC.S_OK()


Script.registerSwitch( "V:", "vo=", "choose resources eligible for the given VO", setVO )
Script.registerSwitch( "S", "se", "display storage element information", setSEFlag )
Script.registerSwitch( "C", "ce", "display computing element information", setSEFlag )
Script.setUsageMessage('\n'.join( [ __doc__.split( '\n' )[1],
                                    'Usage:',
                                    '  %s [option|cfgfile] ...' % Script.scriptName, ] )   )
Script.parseCommandLine()

from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources, getSites

resources = Resources ( vo = 'biomed' )

result = resources.getEligibleSites()
if not result['OK']:
  print "ERROR:", result['Message']

siteList = [ resources.getSiteFullName( site )['Value'] for site in result['Value'] ]

print siteList


Ejemplo n.º 32
0
class StageRequestAgent(AgentModule):
    def initialize(self):
        self.replicaManager = ReplicaManager()
        #self.stagerClient = StorageManagerClient()
        self.dataIntegrityClient = DataIntegrityClient()
        self.storageDB = StorageManagementDB()
        # pin lifetime = 1 day
        self.pinLifetime = self.am_getOption('PinLifetime', THROTTLING_TIME)
        # Resources helper
        self.resources = Resources()

        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/DataManager
        # the shifterProxy option in the Configuration can be used to change this default.
        self.am_setOption('shifterProxy', 'DataManager')

        return S_OK()

    def execute(self):

        # Get the current submitted stage space and the amount of pinned space for each storage element
        res = self.getStorageUsage()
        if not res['OK']:
            return res

        return self.submitStageRequests()

    def getStorageUsage(self):
        """ Fill the current Status of the SE Caches from the DB
    """
        self.storageElementCache = {}

        res = self.storageDB.getSubmittedStagePins()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.",
                res['Message'])
            return res
        self.storageElementUsage = res['Value']
        if self.storageElementUsage:
            gLogger.info(
                "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:"
            )
            for storageElement in sortList(self.storageElementUsage.keys()):
                seDict = self.storageElementUsage[storageElement]
                # Convert to GB for printout
                seDict['TotalSize'] = seDict['TotalSize'] / (1000 * 1000 *
                                                             1000.0)
                gLogger.info(
                    "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB."
                    % (storageElement.ljust(15), str(
                        seDict['Replicas']).rjust(6), seDict['TotalSize']))
        if not self.storageElementUsage:
            gLogger.info(
                "StageRequest.getStorageUsage: No active stage/pin requests found."
            )

        return S_OK()

    def submitStageRequests(self):
        """ This manages the following transitions of the Replicas
        * Waiting -> Offline (if the file is not found Cached)
        * Waiting -> StageSubmitted (if the file is found Cached)
        * Offline -> StageSubmitted (if there are not more Waiting replicas)
    """
        # Retry Replicas that have not been Staged in a previous attempt
        res = self._getMissingReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = res['Value']['SEReplicas']
        allReplicaInfo = res['Value']['AllReplicaInfo']

        if seReplicas:
            gLogger.info(
                "StageRequest.submitStageRequests: Completing partially Staged Tasks"
            )
        for storageElement, seReplicaIDs in seReplicas.items():
            gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
            self._issuePrestageRequests(storageElement, seReplicaIDs,
                                        allReplicaInfo)

        # Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
        res = self._getOnlineReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = res['Value']['SEReplicas']
        allReplicaInfo = res['Value']['AllReplicaInfo']

        # Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
        res = self._getOfflineReplicas()

        if not res['OK']:
            gLogger.fatal(
                "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res

        # Merge info from both results
        for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
            if storageElement not in seReplicas:
                seReplicas[storageElement] = seReplicaIDs
            else:
                for replicaID in seReplicaIDs:
                    if replicaID not in seReplicas[storageElement]:
                        seReplicas[storageElement].append(replicaID)
        allReplicaInfo.update(res['Value']['AllReplicaInfo'])

        gLogger.info(
            "StageRequest.submitStageRequests: Obtained %s replicas for staging."
            % len(allReplicaInfo))
        for storageElement, seReplicaIDs in seReplicas.items():
            gLogger.debug('Staging at %s:' % storageElement, seReplicaIDs)
            self._issuePrestageRequests(storageElement, seReplicaIDs,
                                        allReplicaInfo)
        return S_OK()

    def _getMissingReplicas(self):
        """ This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
        while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
        All SEs are considered, even if their Cache is full
    """
        # Get Replicas that are in Staged/StageSubmitted
        gLogger.info(
            'StageRequest._getMissingReplicas: Checking Staged Replicas')

        res = self.__getStagedReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = {}

        allReplicaInfo = res['Value']['AllReplicaInfo']
        replicasToStage = []
        for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
            # Consider all SEs
            replicasToStage.extend(seReplicaIDs)

        # Get Replicas from the same Tasks as those selected
        res = self.__addAssociatedReplicas(replicasToStage, seReplicas,
                                           allReplicaInfo)
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getMissingReplicas: Failed to get associated Replicas.",
                res['Message'])

        return res

    def _getOnlineReplicas(self):
        """ This manages the transition
        * Waiting -> Offline (if the file is not found Cached)
        and returns the list of Cached Replicas for which the pin time has to be extended
        SEs for which the cache is currently full are not considered
    """
        # Get all Replicas in Waiting Status associated to Staging Tasks
        gLogger.verbose(
            'StageRequest._getOnlineReplicas: Checking Online Replicas to be handled'
        )

        res = self.__getWaitingReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = {}
        allReplicaInfo = res['Value']['AllReplicaInfo']
        if not len(allReplicaInfo):
            gLogger.info(
                "StageRequest._getOnlineReplicas: There were no Waiting replicas found"
            )
            return res
        gLogger.info(
            "StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging."
            % len(allReplicaInfo))
        replicasToStage = []
        for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
            if not self.__usage(storageElement) < self.__cache(storageElement):
                gLogger.info(
                    'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )'
                    % (storageElement, self.__cache(storageElement)))
                # Do not consider those SE that have the Cache full
                continue
            # Check if the Replica Metadata is OK and find out if they are Online or Offline
            res = self.__checkIntegrity(storageElement, seReplicaIDs,
                                        allReplicaInfo)
            if not res['OK']:
                gLogger.error(
                    'StageRequest._getOnlineReplicas: Failed to check Replica Metadata',
                    '(%s): %s' % (storageElement, res['Message']))
            else:
                # keep only Online Replicas
                seReplicas[storageElement] = res['Value']['Online']
                replicasToStage.extend(res['Value']['Online'])

        # Get Replicas from the same Tasks as those selected
        res = self.__addAssociatedReplicas(replicasToStage, seReplicas,
                                           allReplicaInfo)
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getOnlineReplicas: Failed to get associated Replicas.",
                res['Message'])

        return res

    def _getOfflineReplicas(self):
        """ This checks Replicas in Offline status
        and returns the list of Replicas to be Staged
        SEs for which the cache is currently full are not considered
    """
        # Get all Replicas in Waiting Status associated to Staging Tasks
        gLogger.verbose(
            'StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled'
        )

        res = self.__getOfflineReplicas()
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.",
                res['Message'])
            return res
        seReplicas = {}
        allReplicaInfo = res['Value']['AllReplicaInfo']
        if not len(allReplicaInfo):
            gLogger.info(
                "StageRequest._getOfflineReplicas: There were no Offline replicas found"
            )
            return res
        gLogger.info(
            "StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging."
            % len(allReplicaInfo))
        replicasToStage = []

        for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
            if not self.__usage(storageElement) < self.__cache(storageElement):
                gLogger.info(
                    'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )'
                    % (storageElement, self.__cache(storageElement)))
                # Do not consider those SE that have the Cache full
                continue
            seReplicas[storageElement] = []
            for replicaID in sorted(seReplicaIDs):
                seReplicas[storageElement].append(replicaID)
                replicasToStage.append(replicaID)
                self.__add(storageElement, allReplicaInfo[replicaID]['Size'])
                if not self.__usage(storageElement) < self.__cache(
                        storageElement):
                    # Stop adding Replicas when the cache is full
                    break

        # Get Replicas from the same Tasks as those selected
        res = self.__addAssociatedReplicas(replicasToStage, seReplicas,
                                           allReplicaInfo)
        if not res['OK']:
            gLogger.fatal(
                "StageRequest._getOfflineReplicas: Failed to get associated Replicas.",
                res['Message'])

        return res

    def __usage(self, storageElement):
        """ Retrieve current usage of SE
    """
        if not storageElement in self.storageElementUsage:
            self.storageElementUsage[storageElement] = {'TotalSize': 0.}
        return self.storageElementUsage[storageElement]['TotalSize']

    def __cache(self, storageElement):
        """ Retrieve cache size for SE
    """
        if not storageElement in self.storageElementCache:
            diskCache = self.resources.getStorageElementValue(
                storageElement, 'DiskCacheTB', 1.)
            self.storageElementCache[
                storageElement] = diskCache * 1000. / THROTTLING_STEPS
        return self.storageElementCache[storageElement]

    def __add(self, storageElement, size):
        """ Add size (in bytes) to current usage of storageElement (in GB)
    """
        if not storageElement in self.storageElementUsage:
            self.storageElementUsage[storageElement] = {'TotalSize': 0.}
        size = size / (1000 * 1000 * 1000.0)
        self.storageElementUsage[storageElement]['TotalSize'] += size
        return size

    def _issuePrestageRequests(self, storageElement, seReplicaIDs,
                               allReplicaInfo):
        """ Make the request to the SE and update the DB
    """
        pfnRepIDs = {}
        for replicaID in seReplicaIDs:
            pfn = allReplicaInfo[replicaID]['PFN']
            pfnRepIDs[pfn] = replicaID

        # Now issue the prestage requests for the remaining replicas
        stageRequestMetadata = {}
        updatedPfnIDs = []
        if pfnRepIDs:
            gLogger.info(
                "StageRequest._issuePrestageRequests: Submitting %s stage requests for %s."
                % (len(pfnRepIDs), storageElement))
            res = self.replicaManager.prestageStorageFile(
                pfnRepIDs.keys(), storageElement, lifetime=self.pinLifetime)
            gLogger.debug(
                "StageRequest._issuePrestageRequests: replicaManager.prestageStorageFile: res=",
                res)
            #Daniela: fishy result from ReplicaManager!!! Should NOT return OK
            #res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
            #res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}

            if not res['OK']:
                gLogger.error(
                    "StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.",
                    res['Message'])
            else:
                for pfn, requestID in res['Value']['Successful'].items():
                    if not stageRequestMetadata.has_key(requestID):
                        stageRequestMetadata[requestID] = []
                    stageRequestMetadata[requestID].append(pfnRepIDs[pfn])
                    updatedPfnIDs.append(pfnRepIDs[pfn])
        if stageRequestMetadata:
            gLogger.info(
                "StageRequest._issuePrestageRequests: %s stage request metadata to be updated."
                % len(stageRequestMetadata))
            res = self.storageDB.insertStageRequest(stageRequestMetadata,
                                                    self.pinLifetime)
            if not res['OK']:
                gLogger.error(
                    "StageRequest._issuePrestageRequests: Failed to insert stage request metadata.",
                    res['Message'])
                return res
            res = self.storageDB.updateReplicaStatus(updatedPfnIDs,
                                                     'StageSubmitted')
            if not res['OK']:
                gLogger.error(
                    "StageRequest._issuePrestageRequests: Failed to insert replica status.",
                    res['Message'])
        return

    def __sortBySE(self, replicaDict):

        seReplicas = {}
        replicaIDs = {}
        for replicaID, info in replicaDict.items():
            lfn = info['LFN']
            storageElement = info['SE']
            size = info['Size']
            pfn = info['PFN']
            replicaIDs[replicaID] = {
                'LFN': lfn,
                'PFN': pfn,
                'Size': size,
                'StorageElement': storageElement
            }
            if not seReplicas.has_key(storageElement):
                seReplicas[storageElement] = []
            seReplicas[storageElement].append(replicaID)
        return S_OK({'SEReplicas': seReplicas, 'AllReplicaInfo': replicaIDs})

    def __getStagedReplicas(self):
        """ This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
        # First obtain the Waiting replicas from the Replicas table
        res = self.storageDB.getStagedReplicas()
        if not res['OK']:
            gLogger.error(
                "StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.",
                res['Message'])
            return res
        if not res['Value']:
            gLogger.debug(
                "StageRequest.__getStagedReplicas: No Waiting replicas found to process."
            )
        else:
            gLogger.debug(
                "StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process."
                % len(res['Value']))

        return self.__sortBySE(res['Value'])

    def __getWaitingReplicas(self):
        """ This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
        # First obtain the Waiting replicas from the Replicas table
        res = self.storageDB.getWaitingReplicas()
        if not res['OK']:
            gLogger.error(
                "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.",
                res['Message'])
            return res
        if not res['Value']:
            gLogger.debug(
                "StageRequest.__getWaitingReplicas: No Waiting replicas found to process."
            )
        else:
            gLogger.debug(
                "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process."
                % len(res['Value']))

        return self.__sortBySE(res['Value'])

    def __getOfflineReplicas(self):
        """ This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
        # First obtain the Waiting replicas from the Replicas table
        res = self.storageDB.getOfflineReplicas()
        if not res['OK']:
            gLogger.error(
                "StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.",
                res['Message'])
            return res
        if not res['Value']:
            gLogger.debug(
                "StageRequest.__getOfflineReplicas: No Waiting replicas found to process."
            )
        else:
            gLogger.debug(
                "StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process."
                % len(res['Value']))

        return self.__sortBySE(res['Value'])

    def __addAssociatedReplicas(self, replicasToStage, seReplicas,
                                allReplicaInfo):
        """ Retrieve the list of Replicas that belong to the same Tasks as the provided list
    """
        res = self.storageDB.getAssociatedReplicas(replicasToStage)
        if not res['OK']:
            gLogger.fatal(
                "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.",
                res['Message'])
            return res
        addReplicas = {'Offline': {}, 'Waiting': {}}
        replicaIDs = {}
        for replicaID, info in res['Value'].items():
            lfn = info['LFN']
            storageElement = info['SE']
            size = info['Size']
            pfn = info['PFN']
            status = info['Status']
            if status not in ['Waiting', 'Offline']:
                continue
            if not addReplicas[status].has_key(storageElement):
                addReplicas[status][storageElement] = []
            replicaIDs[replicaID] = {
                'LFN': lfn,
                'PFN': pfn,
                'Size': size,
                'StorageElement': storageElement
            }
            addReplicas[status][storageElement].append(replicaID)

        waitingReplicas = addReplicas['Waiting']
        offlineReplicas = addReplicas['Offline']
        newReplicaInfo = replicaIDs
        allReplicaInfo.update(newReplicaInfo)

        # First handle Waiting Replicas for which metadata is to be checked
        for storageElement, seReplicaIDs in waitingReplicas.items():
            for replicaID in list(seReplicaIDs):
                if replicaID in replicasToStage:
                    seReplicaIDs.remove(replicaID)
            res = self.__checkIntegrity(storageElement, seReplicaIDs,
                                        allReplicaInfo)
            if not res['OK']:
                gLogger.error(
                    'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata',
                    '(%s): %s' % (storageElement, res['Message']))
            else:
                # keep all Replicas (Online and Offline)
                if not storageElement in seReplicas:
                    seReplicas[storageElement] = []
                seReplicas[storageElement].extend(res['Value']['Online'])
                replicasToStage.extend(res['Value']['Online'])
                seReplicas[storageElement].extend(res['Value']['Offline'])
                replicasToStage.extend(res['Value']['Offline'])

        # Then handle Offline Replicas for which metadata is already checked
        for storageElement, seReplicaIDs in offlineReplicas.items():
            if not storageElement in seReplicas:
                seReplicas[storageElement] = []
            for replicaID in sorted(seReplicaIDs):
                if replicaID in replicasToStage:
                    seReplicaIDs.remove(replicaID)
            seReplicas[storageElement].extend(seReplicaIDs)
            replicasToStage.extend(seReplicaIDs)

        for replicaID in allReplicaInfo.keys():
            if replicaID not in replicasToStage:
                del allReplicaInfo[replicaID]

        totalSize = 0
        for storageElement in sorted(seReplicas.keys()):
            replicaIDs = seReplicas[storageElement]
            size = 0
            for replicaID in replicaIDs:
                size += self.__add(storageElement,
                                   allReplicaInfo[replicaID]['Size'])

            gLogger.info(
                'StageRequest.__addAssociatedReplicas:  Considering %s GB to be staged at %s'
                % (size, storageElement))
            totalSize += size

        gLogger.info(
            "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging."
            % totalSize)

        return S_OK({
            'SEReplicas': seReplicas,
            'AllReplicaInfo': allReplicaInfo
        })

    def __checkIntegrity(self, storageElement, seReplicaIDs, allReplicaInfo):
        """ Check the integrity of the files to ensure they are available
        Updates status of Offline Replicas for a later pass
        Return list of Online replicas to be Stage
    """
        if not seReplicaIDs:
            return S_OK({'Online': [], 'Offline': []})

        pfnRepIDs = {}
        for replicaID in seReplicaIDs:
            pfn = allReplicaInfo[replicaID]['PFN']
            pfnRepIDs[pfn] = replicaID

        gLogger.info(
            "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s."
            % (len(pfnRepIDs), storageElement))
        res = self.replicaManager.getStorageFileMetadata(
            pfnRepIDs.keys(), storageElement)
        if not res['OK']:
            gLogger.error(
                "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.",
                res['Message'])
            return res

        terminalReplicaIDs = {}
        onlineReplicaIDs = []
        offlineReplicaIDs = []
        for pfn, metadata in res['Value']['Successful'].items():

            if metadata['Size'] != allReplicaInfo[pfnRepIDs[pfn]]['Size']:
                gLogger.error(
                    "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog",
                    pfn)
                terminalReplicaIDs[pfnRepIDs[
                    pfn]] = 'PFN StorageElement size does not match FileCatalog'
                pfnRepIDs.pop(pfn)
            elif metadata['Lost']:
                gLogger.error(
                    "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement",
                    pfn)
                terminalReplicaIDs[
                    pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
                pfnRepIDs.pop(pfn)
            elif metadata['Unavailable']:
                gLogger.error(
                    "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement",
                    pfn)
                terminalReplicaIDs[pfnRepIDs[
                    pfn]] = 'PFN is declared Unavailable by the StorageElement'
                pfnRepIDs.pop(pfn)
            else:
                if metadata['Cached']:
                    gLogger.verbose(
                        "StageRequest.__checkIntegrity: Cache hit for file.")
                    onlineReplicaIDs.append(pfnRepIDs[pfn])
                else:
                    offlineReplicaIDs.append(pfnRepIDs[pfn])

        for pfn, reason in res['Value']['Failed'].items():
            if re.search('File does not exist', reason):
                gLogger.error(
                    "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement",
                    pfn)
                terminalReplicaIDs[pfnRepIDs[
                    pfn]] = 'PFN does not exist in the StorageElement'
            pfnRepIDs.pop(pfn)

        # Update the states of the replicas in the database #TODO Sent status to integrity DB
        if terminalReplicaIDs:
            gLogger.info(
                "StageRequest.__checkIntegrity: %s replicas are terminally failed."
                % len(terminalReplicaIDs))
            res = self.storageDB.updateReplicaFailure(terminalReplicaIDs)
            if not res['OK']:
                gLogger.error(
                    "StageRequest.__checkIntegrity: Failed to update replica failures.",
                    res['Message'])
        if onlineReplicaIDs:
            gLogger.info(
                "StageRequest.__checkIntegrity: %s replicas found Online." %
                len(onlineReplicaIDs))
        if offlineReplicaIDs:
            gLogger.info(
                "StageRequest.__checkIntegrity: %s replicas found Offline." %
                len(offlineReplicaIDs))
            res = self.storageDB.updateReplicaStatus(offlineReplicaIDs,
                                                     'Offline')
        return S_OK({'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs})

    def __reportProblematicFiles(self, lfns, reason):
        return S_OK()
        res = self.dataIntegrityClient.setFileProblematic(
            lfns, reason, self.name)
        if not res['OK']:
            gLogger.error(
                "RequestPreparation.__reportProblematicFiles: Failed to report missing files.",
                res['Message'])
            return res
        if res['Value']['Successful']:
            gLogger.info(
                "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files."
                % len(res['Value']['Successful']))
        if res['Value']['Failed']:
            gLogger.info(
                "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files."
                % len(res['Value']['Failed']))
        return res
Ejemplo n.º 33
0
class StorageElement:
  """
  .. class:: StorageElement

  common interface to the grid storage element
  """

  def __init__( self, name, protocols = None, vo = None ):
    """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    self.resources = Resources( vo = self.vo )

    proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
    result = self.resources.getAccessProtocols( name )
    if result['OK']:
      ap = result['Value'][0]
      useProxy = ( self.resources.getAccessProtocolValue( ap, "Protocol", "UnknownProtocol" )
                   in proxiedProtocols )

    #print "Proxy", name, proxiedProtocols, \
    #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

    if not useProxy:
      useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if protocols == None:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
    else:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localProtocols = factoryDict['LocalProtocols']
      self.remoteProtocols = factoryDict['RemoteProtocols']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )

    self.readMethods = [ 'getFile',
                         'getAccessUrl',
                         'getTransportURL',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                           ]

    self.okMethods = [ 'getLocalProtocols',
                       'getPfnForProtocol',
                       'getPfnForLfn',
                       'getPfnPath',
                       'getProtocols',
                       'getRemoteProtocols',
                       'getStorageElementName',
                       'getStorageElementOption',
                       'getStorageParameters',
                       'isLocalSE' ]

    self.__resourceStatus = ResourceStatus()
    
  def dump( self ):
    """ Dump to the logger a summary of the StorageElement items. """
    self.log.info( "dump: Preparing dump for StorageElement %s." % self.name )
    if not self.valid:
      self.log.error( "dump: Failed to create StorageElement plugins.", self.errorReason )
      return
    i = 1
    outStr = "\n\n============ Options ============\n"
    for key in sorted( self.options ):
      outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )

    for storage in self.storages:
      outStr = "%s============Protocol %s ============\n" % ( outStr, i )
      res = storage.getParameters()
      storageParameters = res['Value']
      for key in sorted( storageParameters ):
        outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
      i = i + 1
    self.log.info( outStr )

  #################################################################################################
  #
  # These are the basic get functions for storage configuration
  #

  def getStorageElementName( self ):
    """ SE name getter """
    self.log.verbose( "getStorageElementName: The Storage Element name is %s." % self.name )
    return S_OK( self.name )

  def getChecksumType( self ):
    """ get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
        global /Resources/StorageElements/ChecksumType
    """
    return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
                 if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )

  def getStatus( self ):
    """
     Return Status of the SE, a dictionary with:
      - Read: True (is allowed), False (it is not allowed)
      - Write: True (is allowed), False (it is not allowed)
      - Remove: True (is allowed), False (it is not allowed)
      - Check: True (is allowed), False (it is not allowed).
      NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
      - DiskSE: True if TXDY with Y > 0 (defaults to True)
      - TapeSE: True if TXDY with X > 0 (defaults to False)
      - TotalCapacityTB: float (-1 if not defined)
      - DiskCacheTB: float (-1 if not defined)
    """
    retDict = {}
    if not self.valid:
      retDict['Read'] = False
      retDict['Write'] = False
      retDict['Remove'] = False
      retDict['Check'] = False
      retDict['DiskSE'] = False
      retDict['TapeSE'] = False
      retDict['TotalCapacityTB'] = -1
      retDict['DiskCacheTB'] = -1
      return S_OK( retDict )

    # If nothing is defined in the CS Access is allowed
    # If something is defined, then it must be set to Active
    retDict['Read'] = self.__resourceStatus.isUsableStorage( self.name, 'ReadAccess' )
    retDict['Write'] = self.__resourceStatus.isUsableStorage( self.name, 'WriteAccess' )
    retDict['Remove'] = self.__resourceStatus.isUsableStorage( self.name, 'RemoveAccess' )
    if retDict['Read']:
      retDict['Check'] = True
    else:
      retDict['Check'] = self.__resourceStatus.isUsableStorage( self.name, 'CheckAccess' )
    diskSE = True
    tapeSE = False
    if 'SEType' in self.options:
      # Type should follow the convention TXDY
      seType = self.options['SEType']
      diskSE = re.search( 'D[1-9]', seType ) != None
      tapeSE = re.search( 'T[1-9]', seType ) != None
    retDict['DiskSE'] = diskSE
    retDict['TapeSE'] = tapeSE
    try:
      retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
    except Exception:
      retDict['TotalCapacityTB'] = -1
    try:
      retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
    except Exception:
      retDict['DiskCacheTB'] = -1

    return S_OK( retDict )

  def isValid( self, operation = '' ):
    """ check CS/RSS statuses for :operation:

    :param str operation: operation name
    """
    self.log.debug( "isValid: Determining whether the StorageElement %s is valid for %s" % ( self.name,
                                                                                             operation ) )

    if ( not operation ) or ( operation in self.okMethods ):
      return S_OK()

    if not self.valid:
      self.log.error( "isValid: Failed to create StorageElement plugins.", self.errorReason )
      return S_ERROR( self.errorReason )
    # Determine whether the StorageElement is valid for checking, reading, writing
    res = self.getStatus()
    if not res[ 'OK' ]:
      self.log.error( "Could not call getStatus" )
      return S_ERROR( "StorageElement.isValid could not call the getStatus method" )
    checking = res[ 'Value' ][ 'Check' ]
    reading = res[ 'Value' ][ 'Read' ]
    writing = res[ 'Value' ][ 'Write' ]
    removing = res[ 'Value' ][ 'Remove' ]

    # Determine whether the requested operation can be fulfilled
    if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
      self.log.error( "isValid: Read, write and check access not permitted." )
      return S_ERROR( "StorageElement.isValid: Read, write and check access not permitted." )

    # The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
    if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
      operation = 'ReadAccess'
    elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
      operation = 'WriteAccess'
    elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
      operation = 'RemoveAccess'
    elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
      operation = 'CheckAccess'
    else:
      self.log.error( "isValid: The supplied operation is not known.", operation )
      return S_ERROR( "StorageElement.isValid: The supplied operation is not known." )
    self.log.debug( "in isValid check the operation: %s " % operation )
    # Check if the operation is valid
    if operation == 'CheckAccess':
      if not reading:
        if not checking:
          self.log.error( "isValid: Check access not currently permitted." )
          return S_ERROR( "StorageElement.isValid: Check access not currently permitted." )
    if operation == 'ReadAccess':
      if not reading:
        self.log.error( "isValid: Read access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Read access not currently permitted." )
    if operation == 'WriteAccess':
      if not writing:
        self.log.error( "isValid: Write access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Write access not currently permitted." )
    if operation == 'RemoveAccess':
      if not removing:
        self.log.error( "isValid: Remove access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Remove access not currently permitted." )
    return S_OK()

  def getProtocols( self ):
    """ Get the list of all the protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getProtocols: Obtaining all protocols." )
    allProtocols = self.localProtocols + self.remoteProtocols
    return S_OK( allProtocols )

  def getRemoteProtocols( self ):
    """ Get the list of all the remote access protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getRemoteProtocols: Obtaining remote protocols for %s." % self.name )
    return S_OK( self.remoteProtocols )

  def getLocalProtocols( self ):
    """ Get the list of all the local access protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getLocalProtocols: Obtaining local protocols for %s." % self.name )
    return S_OK( self.localProtocols )

  def getStorageElementOption( self, option ):
    """ Get the value for the option supplied from self.options
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getStorageElementOption: Obtaining %s option for Storage Element %s." % ( option,
                                                                                                 self.name ) )
    if option in self.options:
      optionValue = self.options[option]
      return S_OK( optionValue )
    else:
      errStr = "getStorageElementOption: Option not defined for SE."
      self.log.error( errStr, "%s for %s" % ( option, self.name ) )
      return S_ERROR( errStr )

  def getStorageParameters( self, protocol ):
    """ Get protocol specific options
    """
    self.log.verbose( "getStorageParameters: Obtaining storage parameters for %s protocol %s." % ( self.name,
                                                                                                   protocol ) )
    res = self.getProtocols()
    if not res['OK']:
      return res
    availableProtocols = res['Value']
    if not protocol in availableProtocols:
      errStr = "getStorageParameters: Requested protocol not available for SE."
      self.log.warn( errStr, '%s for %s' % ( protocol, self.name ) )
      return S_ERROR( errStr )
    for storage in self.storages:
      res = storage.getParameters()
      storageParameters = res['Value']
      if storageParameters['ProtocolName'] == protocol:
        return S_OK( storageParameters )
    errStr = "getStorageParameters: Requested protocol supported but no object found."
    self.log.error( errStr, "%s for %s" % ( protocol, self.name ) )
    return S_ERROR( errStr )

  def isLocalSE( self ):
    """ Test if the Storage Element is local in the current context
    """
    import DIRAC
    self.log.verbose( "isLocalSE: Determining whether %s is a local SE." % self.name )
    localSEs = getSEsForSite( DIRAC.siteName() )['Value']
    if self.name in localSEs:
      return S_OK( True )
    else:
      return S_OK( False )

  #################################################################################################
  #
  # These are the basic get functions for pfn manipulation
  #

  def getPfnForProtocol( self, pfn, protocol, withPort = True ):
    """ Transform the input pfn into another with the given protocol for the Storage Element.
    """
    res = self.getProtocols()
    if not res['OK']:
      return res
    if type( protocol ) == StringType:
      protocols = [protocol]
    elif type( protocol ) == ListType:
      protocols = protocol
    else:
      errStr = "getPfnForProtocol: Supplied protocol must be string or list of strings."
      self.log.error( errStr, "%s %s" % ( protocol, self.name ) )
      return S_ERROR( errStr )
    availableProtocols = res['Value']
    protocolsToTry = []
    for protocol in protocols:
      if protocol in availableProtocols:
        protocolsToTry.append( protocol )
      else:
        errStr = "getPfnForProtocol: Requested protocol not available for SE."
        self.log.debug( errStr, '%s for %s' % ( protocol, self.name ) )
    if not protocolsToTry:
      errStr = "getPfnForProtocol: None of the requested protocols were available for SE."
      self.log.error( errStr, '%s for %s' % ( protocol, self.name ) )
      return S_ERROR( errStr )
    # Check all available storages for required protocol then contruct the PFN
    for storage in self.storages:
      res = storage.getParameters()
      if res['Value']['ProtocolName'] in protocolsToTry:
        res = pfnparse( pfn )
        if res['OK']:
          res = storage.getProtocolPfn( res['Value'], withPort )
          if res['OK']:
            return res
    errStr = "getPfnForProtocol: Failed to get PFN for requested protocols."
    self.log.error( errStr, "%s for %s" % ( protocols, self.name ) )
    return S_ERROR( errStr )

  def getPfnPath( self, pfn ):
    """  Get the part of the PFN path below the basic storage path.
         This path must coincide with the LFN of the file in order to be compliant with the LHCb conventions.
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    res = pfnparse( pfn )
    if not res['OK']:
      return res
    fullPfnPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )

    # Check all available storages and check whether the pfn is for that protocol
    pfnPath = ''
    for storage in self.storages:
      res = storage.isPfnForProtocol( pfn )
      if res['OK']:
        if res['Value']:
          res = storage.getParameters()
          saPath = res['Value']['Path']
          if not saPath:
            # If the sa path doesn't exist then the pfn path is the entire string
            pfnPath = fullPfnPath
          else:
            if re.search( saPath, fullPfnPath ):
              # Remove the sa path from the fullPfnPath
              pfnPath = fullPfnPath.replace( saPath, '' )
      if pfnPath:
        return S_OK( pfnPath )
    # This should never happen. DANGER!!
    errStr = "getPfnPath: Failed to get the pfn path for any of the protocols!!"
    self.log.error( errStr )
    return S_ERROR( errStr )

  def getPfnForLfn( self, lfn ):
    """ Get the full PFN constructed from the LFN.
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    for storage in self.storages:
      res = storage.getPFNBase()
      if res['OK']:
        fullPath = "%s%s" % ( res['Value'], lfn )
        return S_OK( fullPath )
    # This should never happen. DANGER!!
    errStr = "getPfnForLfn: Failed to get the full pfn for any of the protocols!!"
    self.log.error( errStr )
    return S_ERROR( errStr )

  def getPFNBase( self ):
    """ Get the base to construct a PFN
    """
    if not self.storages:
      return S_ERROR( 'No storages defined' )
    for storage in self.storages:
      result = storage.getPFNBase()
      if result['OK']:
        return result

    return result

  ###########################################################################################
  #
  # This is the generic wrapper for file operations
  #

  def retransferOnlineFile( self, pfn, singleFile = False ):
    """ execcute 'retransferOnlineFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'retransferOnlineFile' )

  def exists( self, pfn, singleFile = False ):
    """ execute 'exists' operation  """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'exists' )


  def isFile( self, pfn, singleFile = False ):
    """ execute 'isFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'isFile' )

  def getFile( self, pfn, localPath = False, singleFile = False ):
    """ execute 'getFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFile', { 'localPath': localPath } )

  def putFile( self, pfn, sourceSize = 0, singleFile = False ):
    """ execute 'putFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'putFile', { 'sourceSize': sourceSize } )

  def replicateFile( self, pfn, sourceSize = 0, singleFile = False ):
    """ execute 'putFile' as replicate """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'putFile', { 'sourceSize': sourceSize } )

  def getFileMetadata( self, pfn, singleFile = False ):
    """ execute 'getFileMetadata' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFileMetadata' )

  def getFileSize( self, pfn, singleFile = False ):
    """ execute 'getFileSize' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFileSize' )

  def getAccessUrl( self, pfn, protocol = False, singleFile = False ):
    """ execute 'getTransportURL' operation """
    if not protocol:
      protocols = self.turlProtocols
    else:
      protocols = [protocol]
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getTransportURL', {'protocols': protocols} )

  def removeFile( self, pfn, singleFile = False ):
    """ execute 'removeFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'removeFile' )

  def prestageFile( self, pfn, lifetime = 86400, singleFile = False ):
    """ execute 'prestageFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'prestageFile', { 'lifetime': lifetime } )

  def prestageFileStatus( self, pfn, singleFile = False ):
    """ execute 'prestageFileStatus' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'prestageFileStatus' )

  def pinFile( self, pfn, lifetime = 60 * 60 * 24, singleFile = False ):
    """ execute 'pinFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'pinFile', { 'lifetime': lifetime } )

  def releaseFile( self, pfn, singleFile = False ):
    """ execute 'releaseFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'releaseFile' )

  def isDirectory( self, pfn, singleDirectory = False ):
    """ execute 'isDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'isDirectory' )

  def getDirectoryMetadata( self, pfn, singleDirectory = False ):
    """ execute 'getDirectoryMetadata' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectoryMetadata' )

  def getDirectorySize( self, pfn, singleDirectory = False ):
    """ execute 'getDirectorySize' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectorySize' )

  def listDirectory( self, pfn, singleDirectory = False ):
    """ execute 'listDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'listDirectory' )

  def removeDirectory( self, pfn, recursive = False, singleDirectory = False ):
    """ execute 'removeDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'removeDirectory', {'recursive':
                                                                                               recursive} )

  def createDirectory( self, pfn, singleDirectory = False ):
    """ execute 'createDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'createDirectory' )

  def putDirectory( self, pfn, singleDirectory = False ):
    """ execute 'putDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'putDirectory' )

  def getDirectory( self, pfn, localPath = False, singleDirectory = False ):
    """ execute 'getDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectory', { 'localPath':
                                                                                             localPath } )

  def __executeSingleFile( self, pfn, operation, arguments = None ):
    """ execute for single file """
    if arguments == None:
      res = self.__executeFunction( pfn, operation, {} )
    else:
      res = self.__executeFunction( pfn, operation, arguments )
    if type( pfn ) == ListType:
      pfn = pfn[0]
    elif type( pfn ) == DictType:
      pfn = pfn.keys()[0]
    if not res['OK']:
      return res
    elif pfn in res['Value']['Failed']:
      errorMessage = res['Value']['Failed'][pfn]
      return S_ERROR( errorMessage )
    else:
      return S_OK( res['Value']['Successful'][pfn] )

  def __executeFunction( self, pfn, method, argsDict = None ):
    """
        'pfn' is the physical file name (as registered in the LFC)
        'method' is the functionality to be executed
    """
    ## default args  = no args
    argsDict = argsDict if argsDict else {}
    if type( pfn ) in StringTypes:
      pfns = {pfn:False}
    elif type( pfn ) == ListType:
      pfns = {}
      for url in pfn:
        pfns[url] = False
    elif type( pfn ) == DictType:
      pfns = pfn.copy()
    else:
      errStr = "__executeFunction: Supplied pfns must be string or list of strings or a dictionary."
      self.log.error( errStr )
      return S_ERROR( errStr )

    if not pfns:
      self.log.verbose( "__executeFunction: No pfns supplied." )
      return S_OK( {'Failed':{}, 'Successful':{}} )
    self.log.verbose( "__executeFunction: Attempting to perform '%s' operation with %s pfns." % ( method,
                                                                                                  len( pfns ) ) )

    res = self.isValid( operation = method )
    if not res['OK']:
      return res
    else:
      if not self.valid:
        return S_ERROR( self.errorReason )

    successful = {}
    failed = {}
    localSE = self.isLocalSE()['Value']
    # Try all of the storages one by one
    for storage in self.storages:
      # Determine whether to use this storage object
      res = storage.getParameters()
      useProtocol = True
      if not res['OK']:
        self.log.error( "__executeFunction: Failed to get storage parameters.", "%s %s" % ( self.name,
                                                                                            res['Message'] ) )
        useProtocol = False
      else:
        protocolName = res['Value']['ProtocolName']
        if not pfns:
          useProtocol = False
          self.log.verbose( "__executeFunction: No pfns to be attempted for %s protocol." % protocolName )
        elif not ( protocolName in self.remoteProtocols ) and not localSE:
          # If the SE is not local then we can't use local protocols
          useProtocol = False
          self.log.verbose( "__executeFunction: Protocol not appropriate for use: %s." % protocolName )
      if useProtocol:
        self.log.verbose( "__executeFunction: Generating %s protocol PFNs for %s." % ( len( pfns ),
                                                                                       protocolName ) )
        res = self.__generatePfnDict( pfns, storage )
        pfnDict = res['Value']
        failed.update( res['Failed'] )
        if not len( pfnDict ) > 0:
          self.log.verbose( "__executeFunction No pfns generated for protocol %s." % protocolName )
        else:
          self.log.verbose( "__executeFunction: Attempting to perform '%s' for %s physical files" % ( method,
                                                                                                      len( pfnDict ) ) )
          fcn = None
          if hasattr( storage, method ) and callable( getattr( storage, method ) ):
            fcn = getattr( storage, method )
          if not fcn:
            return S_ERROR( "__executeFunction: unable to invoke %s, it isn't a member function of storage" )

          pfnsToUse = {}
          for pfn in pfnDict:
            pfnsToUse[pfn] = pfns[pfnDict[pfn]]

          res = fcn( pfnsToUse, **argsDict )

          if not res['OK']:
            errStr = "__executeFunction: Completely failed to perform %s." % method
            self.log.error( errStr, '%s for protocol %s: %s' % ( self.name, protocolName, res['Message'] ) )
            for pfn in pfnDict.values():
              if pfn not in failed:
                failed[pfn] = ''
              failed[pfn] = "%s %s" % ( failed[pfn], res['Message'] )
          else:
            for protocolPfn, pfn in pfnDict.items():
              if protocolPfn not in res['Value']['Successful']:
                if pfn not in failed:
                  failed[pfn] = ''
                if protocolPfn in res['Value']['Failed']:
                  failed[pfn] = "%s %s" % ( failed[pfn], res['Value']['Failed'][protocolPfn] )
                else:
                  failed[pfn] = "%s %s" % ( failed[pfn], 'No error returned from plug-in' )
              else:
                successful[pfn] = res['Value']['Successful'][protocolPfn]
                if pfn in failed:
                  failed.pop( pfn )
                pfns.pop( pfn )

    return S_OK( { 'Failed': failed, 'Successful': successful } )

  def __generatePfnDict( self, pfns, storage ):
    """ whatever, it creates PFN dict  """
    pfnDict = {}
    failed = {}
    for pfn in pfns:
      res = pfnparse( pfn )
      if not res['OK']:
        errStr = "__generatePfnDict: Failed to parse supplied PFN."
        self.log.error( errStr, "%s: %s" % ( pfn, res['Message'] ) )
        if pfn not in failed:
          failed[pfn] = ''
        failed[pfn] = "%s %s" % ( failed[pfn], errStr )
      else:
        res = storage.getProtocolPfn( res['Value'], True )
        if not res['OK']:
          errStr = "__generatePfnDict %s." % res['Message']
          self.log.error( errStr, 'for %s' % ( pfn ) )
          if pfn not in failed:
            failed[pfn] = ''
          failed[pfn] = "%s %s" % ( failed[pfn], errStr )
        else:
          pfnDict[res['Value']] = pfn
    res = S_OK( pfnDict )
    res['Failed'] = failed
    return res
Ejemplo n.º 34
0
class StageRequestAgent( AgentModule ):

  def initialize( self ):
    self.replicaManager = ReplicaManager()
    #self.stagerClient = StorageManagerClient()
    self.dataIntegrityClient = DataIntegrityClient()
    self.storageDB = StorageManagementDB()
    # pin lifetime = 1 day
    self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
    # Resources helper
    self.resources = Resources()

    # This sets the Default Proxy to used as that defined under
    # /Operations/Shifter/DataManager
    # the shifterProxy option in the Configuration can be used to change this default.
    self.am_setOption( 'shifterProxy', 'DataManager' )

    return S_OK()

  def execute( self ):

    # Get the current submitted stage space and the amount of pinned space for each storage element
    res = self.getStorageUsage()
    if not res['OK']:
      return res

    return self.submitStageRequests()

  def getStorageUsage( self ):
    """ Fill the current Status of the SE Caches from the DB
    """
    self.storageElementCache = {}

    res = self.storageDB.getSubmittedStagePins()
    if not res['OK']:
      gLogger.fatal( "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
      return res
    self.storageElementUsage = res['Value']
    if self.storageElementUsage:
      gLogger.info( "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:" )
      for storageElement in sortList( self.storageElementUsage.keys() ):
        seDict = self.storageElementUsage[storageElement]
        # Convert to GB for printout
        seDict['TotalSize'] = seDict['TotalSize'] / ( 1000 * 1000 * 1000.0 )
        gLogger.info( "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
                      ( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] ) )
    if not self.storageElementUsage:
      gLogger.info( "StageRequest.getStorageUsage: No active stage/pin requests found." )

    return S_OK()


  def submitStageRequests( self ):
    """ This manages the following transitions of the Replicas
        * Waiting -> Offline (if the file is not found Cached)
        * Waiting -> StageSubmitted (if the file is found Cached)
        * Offline -> StageSubmitted (if there are not more Waiting replicas)
    """
    # Retry Replicas that have not been Staged in a previous attempt 
    res = self._getMissingReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = res['Value']['SEReplicas']
    allReplicaInfo = res['Value']['AllReplicaInfo']

    if seReplicas:
      gLogger.info( "StageRequest.submitStageRequests: Completing partially Staged Tasks" )
    for storageElement, seReplicaIDs in seReplicas.items():
      gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
      self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )

    # Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
    res = self._getOnlineReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = res['Value']['SEReplicas']
    allReplicaInfo = res['Value']['AllReplicaInfo']

    # Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
    res = self._getOfflineReplicas()

    if not res['OK']:
      gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res

    # Merge info from both results
    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      if storageElement not in seReplicas:
        seReplicas[storageElement] = seReplicaIDs
      else:
        for replicaID in seReplicaIDs:
          if replicaID not in seReplicas[storageElement]:
            seReplicas[storageElement].append( replicaID )
    allReplicaInfo.update( res['Value']['AllReplicaInfo'] )

    gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas for staging." % len( allReplicaInfo ) )
    for storageElement, seReplicaIDs in seReplicas.items():
      gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
      self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
    return S_OK()

  def _getMissingReplicas( self ):
    """ This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
        while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
        All SEs are considered, even if their Cache is full
    """
    # Get Replicas that are in Staged/StageSubmitted 
    gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )

    res = self.__getStagedReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = {}

    allReplicaInfo = res['Value']['AllReplicaInfo']
    replicasToStage = []
    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      # Consider all SEs
      replicasToStage.extend( seReplicaIDs )

    # Get Replicas from the same Tasks as those selected
    res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
    if not res['OK']:
      gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )

    return res

  def _getOnlineReplicas( self ):
    """ This manages the transition
        * Waiting -> Offline (if the file is not found Cached)
        and returns the list of Cached Replicas for which the pin time has to be extended
        SEs for which the cache is currently full are not considered
    """
    # Get all Replicas in Waiting Status associated to Staging Tasks
    gLogger.verbose( 'StageRequest._getOnlineReplicas: Checking Online Replicas to be handled' )

    res = self.__getWaitingReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = {}
    allReplicaInfo = res['Value']['AllReplicaInfo']
    if not len( allReplicaInfo ):
      gLogger.info( "StageRequest._getOnlineReplicas: There were no Waiting replicas found" )
      return res
    gLogger.info( "StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
    replicasToStage = []
    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      if not self.__usage( storageElement ) < self.__cache( storageElement ):
        gLogger.info( 'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
        # Do not consider those SE that have the Cache full
        continue
      # Check if the Replica Metadata is OK and find out if they are Online or Offline
      res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
      if not res['OK']:
        gLogger.error( 'StageRequest._getOnlineReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
      else:
        # keep only Online Replicas
        seReplicas[storageElement] = res['Value']['Online']
        replicasToStage.extend( res['Value']['Online'] )

    # Get Replicas from the same Tasks as those selected
    res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
    if not res['OK']:
      gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get associated Replicas.", res['Message'] )

    return res

  def _getOfflineReplicas( self ):
    """ This checks Replicas in Offline status
        and returns the list of Replicas to be Staged
        SEs for which the cache is currently full are not considered
    """
    # Get all Replicas in Waiting Status associated to Staging Tasks
    gLogger.verbose( 'StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled' )

    res = self.__getOfflineReplicas()
    if not res['OK']:
      gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
      return res
    seReplicas = {}
    allReplicaInfo = res['Value']['AllReplicaInfo']
    if not len( allReplicaInfo ):
      gLogger.info( "StageRequest._getOfflineReplicas: There were no Offline replicas found" )
      return res
    gLogger.info( "StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging." % len( allReplicaInfo ) )
    replicasToStage = []

    for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
      if not self.__usage( storageElement ) < self.__cache( storageElement ):
        gLogger.info( 'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
        # Do not consider those SE that have the Cache full
        continue
      seReplicas[storageElement] = []
      for replicaID in sorted( seReplicaIDs ):
        seReplicas[storageElement].append( replicaID )
        replicasToStage.append( replicaID )
        self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
        if not self.__usage( storageElement ) < self.__cache( storageElement ):
          # Stop adding Replicas when the cache is full
          break

    # Get Replicas from the same Tasks as those selected
    res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
    if not res['OK']:
      gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get associated Replicas.", res['Message'] )

    return res

  def __usage( self, storageElement ):
    """ Retrieve current usage of SE
    """
    if not storageElement in self.storageElementUsage:
      self.storageElementUsage[storageElement] = {'TotalSize': 0.}
    return self.storageElementUsage[storageElement]['TotalSize']

  def __cache( self, storageElement ):
    """ Retrieve cache size for SE
    """
    if not storageElement in self.storageElementCache:
      diskCache = self.resources.getStorageElementValue( storageElement, 'DiskCacheTB', 1. )
      self.storageElementCache[storageElement] = diskCache * 1000. / THROTTLING_STEPS
    return self.storageElementCache[storageElement]

  def __add( self, storageElement, size ):
    """ Add size (in bytes) to current usage of storageElement (in GB)
    """
    if not storageElement in self.storageElementUsage:
      self.storageElementUsage[storageElement] = {'TotalSize': 0.}
    size = size / ( 1000 * 1000 * 1000.0 )
    self.storageElementUsage[storageElement]['TotalSize'] += size
    return size

  def _issuePrestageRequests( self, storageElement, seReplicaIDs, allReplicaInfo ):
    """ Make the request to the SE and update the DB
    """
    pfnRepIDs = {}
    for replicaID in seReplicaIDs:
      pfn = allReplicaInfo[replicaID]['PFN']
      pfnRepIDs[pfn] = replicaID

    # Now issue the prestage requests for the remaining replicas
    stageRequestMetadata = {}
    updatedPfnIDs = []
    if pfnRepIDs:
      gLogger.info( "StageRequest._issuePrestageRequests: Submitting %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
      res = self.replicaManager.prestageStorageFile( pfnRepIDs.keys(), storageElement, lifetime = self.pinLifetime )
      gLogger.debug( "StageRequest._issuePrestageRequests: replicaManager.prestageStorageFile: res=", res )
      #Daniela: fishy result from ReplicaManager!!! Should NOT return OK
      #res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
      #res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}

      if not res['OK']:
        gLogger.error( "StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.", res['Message'] )
      else:
        for pfn, requestID in res['Value']['Successful'].items():
          if not stageRequestMetadata.has_key( requestID ):
            stageRequestMetadata[requestID] = []
          stageRequestMetadata[requestID].append( pfnRepIDs[pfn] )
          updatedPfnIDs.append( pfnRepIDs[pfn] )
    if stageRequestMetadata:
      gLogger.info( "StageRequest._issuePrestageRequests: %s stage request metadata to be updated." % len( stageRequestMetadata ) )
      res = self.storageDB.insertStageRequest( stageRequestMetadata, self.pinLifetime )
      if not res['OK']:
        gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert stage request metadata.", res['Message'] )
        return res
      res = self.storageDB.updateReplicaStatus( updatedPfnIDs, 'StageSubmitted' )
      if not res['OK']:
        gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert replica status.", res['Message'] )
    return

  def __sortBySE( self, replicaDict ):

    seReplicas = {}
    replicaIDs = {}
    for replicaID, info in replicaDict.items():
      lfn = info['LFN']
      storageElement = info['SE']
      size = info['Size']
      pfn = info['PFN']
      replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement}
      if not seReplicas.has_key( storageElement ):
        seReplicas[storageElement] = []
      seReplicas[storageElement].append( replicaID )
    return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':replicaIDs} )

  def __getStagedReplicas( self ):
    """ This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
    # First obtain the Waiting replicas from the Replicas table
    res = self.storageDB.getStagedReplicas()
    if not res['OK']:
      gLogger.error( "StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.", res['Message'] )
      return res
    if not res['Value']:
      gLogger.debug( "StageRequest.__getStagedReplicas: No Waiting replicas found to process." )
    else:
      gLogger.debug( "StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )

    return self.__sortBySE( res['Value'] )

  def __getWaitingReplicas( self ):
    """ This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
    # First obtain the Waiting replicas from the Replicas table
    res = self.storageDB.getWaitingReplicas()
    if not res['OK']:
      gLogger.error( "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'] )
      return res
    if not res['Value']:
      gLogger.debug( "StageRequest.__getWaitingReplicas: No Waiting replicas found to process." )
    else:
      gLogger.debug( "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )

    return self.__sortBySE( res['Value'] )

  def __getOfflineReplicas( self ):
    """ This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
    # First obtain the Waiting replicas from the Replicas table
    res = self.storageDB.getOfflineReplicas()
    if not res['OK']:
      gLogger.error( "StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.", res['Message'] )
      return res
    if not res['Value']:
      gLogger.debug( "StageRequest.__getOfflineReplicas: No Waiting replicas found to process." )
    else:
      gLogger.debug( "StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )

    return self.__sortBySE( res['Value'] )

  def __addAssociatedReplicas( self, replicasToStage, seReplicas, allReplicaInfo ):
    """ Retrieve the list of Replicas that belong to the same Tasks as the provided list
    """
    res = self.storageDB.getAssociatedReplicas( replicasToStage )
    if not res['OK']:
      gLogger.fatal( "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.", res['Message'] )
      return res
    addReplicas = {'Offline': {}, 'Waiting': {}}
    replicaIDs = {}
    for replicaID, info in res['Value'].items():
      lfn = info['LFN']
      storageElement = info['SE']
      size = info['Size']
      pfn = info['PFN']
      status = info['Status']
      if status not in ['Waiting', 'Offline']:
        continue
      if not addReplicas[status].has_key( storageElement ):
        addReplicas[status][storageElement] = []
      replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement }
      addReplicas[status][storageElement].append( replicaID )

    waitingReplicas = addReplicas['Waiting']
    offlineReplicas = addReplicas['Offline']
    newReplicaInfo = replicaIDs
    allReplicaInfo.update( newReplicaInfo )

    # First handle Waiting Replicas for which metadata is to be checked
    for storageElement, seReplicaIDs in waitingReplicas.items():
      for replicaID in list( seReplicaIDs ):
        if replicaID in replicasToStage:
          seReplicaIDs.remove( replicaID )
      res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
      if not res['OK']:
        gLogger.error( 'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
      else:
        # keep all Replicas (Online and Offline)
        if not storageElement in seReplicas:
          seReplicas[storageElement] = []
        seReplicas[storageElement].extend( res['Value']['Online'] )
        replicasToStage.extend( res['Value']['Online'] )
        seReplicas[storageElement].extend( res['Value']['Offline'] )
        replicasToStage.extend( res['Value']['Offline'] )

    # Then handle Offline Replicas for which metadata is already checked
    for storageElement, seReplicaIDs in offlineReplicas.items():
      if not storageElement in seReplicas:
        seReplicas[storageElement] = []
      for replicaID in sorted( seReplicaIDs ):
        if replicaID in replicasToStage:
          seReplicaIDs.remove( replicaID )
      seReplicas[storageElement].extend( seReplicaIDs )
      replicasToStage.extend( seReplicaIDs )

    for replicaID in allReplicaInfo.keys():
      if replicaID not in replicasToStage:
        del allReplicaInfo[replicaID]

    totalSize = 0
    for storageElement in sorted( seReplicas.keys() ):
      replicaIDs = seReplicas[storageElement]
      size = 0
      for replicaID in replicaIDs:
        size += self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )

      gLogger.info( 'StageRequest.__addAssociatedReplicas:  Considering %s GB to be staged at %s' % ( size, storageElement ) )
      totalSize += size

    gLogger.info( "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize )

    return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':allReplicaInfo} )

  def __checkIntegrity( self, storageElement, seReplicaIDs, allReplicaInfo ):
    """ Check the integrity of the files to ensure they are available
        Updates status of Offline Replicas for a later pass
        Return list of Online replicas to be Stage
    """
    if not seReplicaIDs:
      return S_OK( {'Online': [], 'Offline': []} )

    pfnRepIDs = {}
    for replicaID in seReplicaIDs:
      pfn = allReplicaInfo[replicaID]['PFN']
      pfnRepIDs[pfn] = replicaID

    gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( pfnRepIDs ), storageElement ) )
    res = self.replicaManager.getStorageFileMetadata( pfnRepIDs.keys(), storageElement )
    if not res['OK']:
      gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
      return res

    terminalReplicaIDs = {}
    onlineReplicaIDs = []
    offlineReplicaIDs = []
    for pfn, metadata in res['Value']['Successful'].items():

      if metadata['Size'] != allReplicaInfo[pfnRepIDs[pfn]]['Size']:
        gLogger.error( "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog", pfn )
        terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN StorageElement size does not match FileCatalog'
        pfnRepIDs.pop( pfn )
      elif metadata['Lost']:
        gLogger.error( "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement", pfn )
        terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
        pfnRepIDs.pop( pfn )
      elif metadata['Unavailable']:
        gLogger.error( "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement", pfn )
        terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN is declared Unavailable by the StorageElement'
        pfnRepIDs.pop( pfn )
      else:
        if metadata['Cached']:
          gLogger.verbose( "StageRequest.__checkIntegrity: Cache hit for file." )
          onlineReplicaIDs.append( pfnRepIDs[pfn] )
        else:
          offlineReplicaIDs.append( pfnRepIDs[pfn] )

    for pfn, reason in res['Value']['Failed'].items():
      if re.search( 'File does not exist', reason ):
        gLogger.error( "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement", pfn )
        terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN does not exist in the StorageElement'
      pfnRepIDs.pop( pfn )

    # Update the states of the replicas in the database #TODO Sent status to integrity DB
    if terminalReplicaIDs:
      gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
      res = self.storageDB.updateReplicaFailure( terminalReplicaIDs )
      if not res['OK']:
        gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
    if onlineReplicaIDs:
      gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Online." % len( onlineReplicaIDs ) )
    if offlineReplicaIDs:
      gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Offline." % len( offlineReplicaIDs ) )
      res = self.storageDB.updateReplicaStatus( offlineReplicaIDs, 'Offline' )
    return S_OK( {'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs} )

  def __reportProblematicFiles( self, lfns, reason ):
    return S_OK()
    res = self.dataIntegrityClient.setFileProblematic( lfns, reason, self.name )
    if not res['OK']:
      gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
      return res
    if res['Value']['Successful']:
      gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
    if res['Value']['Failed']:
      gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
    return res
Ejemplo n.º 35
0
    def __getGridJobOutput(self, pilotReference):
        """ Get the pilot job standard output and standard error files for the Grid
        job reference
    """

        result = pilotDB.getPilotInfo(pilotReference)
        if not result['OK'] or not result['Value']:
            return S_ERROR('Failed to get info for pilot ' + pilotReference)

        pilotDict = result['Value'][pilotReference]
        owner = pilotDict['OwnerDN']
        group = pilotDict['OwnerGroup']

        # FIXME: What if the OutputSandBox is not StdOut and StdErr, what do we do with other files?
        result = pilotDB.getPilotOutput(pilotReference)
        if result['OK']:
            stdout = result['Value']['StdOut']
            error = result['Value']['StdErr']
            if stdout or error:
                resultDict = {}
                resultDict['StdOut'] = stdout
                resultDict['StdErr'] = error
                resultDict['OwnerDN'] = owner
                resultDict['OwnerGroup'] = group
                resultDict['FileList'] = []
                return S_OK(resultDict)
            else:
                gLogger.warn('Empty pilot output found for %s' %
                             pilotReference)

        gridType = pilotDict['GridType']
        if gridType in ["LCG", "gLite", "CREAM"]:
            group = getGroupOption(group, 'VOMSRole', group)
            ret = gProxyManager.getPilotProxyFromVOMSGroup(owner, group)
            if not ret['OK']:
                gLogger.error(ret['Message'])
                gLogger.error('Could not get proxy:',
                              'User "%s", Group "%s"' % (owner, group))
                return S_ERROR("Failed to get the pilot's owner proxy")
            proxy = ret['Value']

            pilotStamp = pilotDict['PilotStamp']
            result = getPilotOutput(proxy, gridType, pilotReference,
                                    pilotStamp)
            if not result['OK']:
                return S_ERROR('Failed to get pilot output: ' +
                               result['Message'])
            # FIXME: What if the OutputSandBox is not StdOut and StdErr, what do we do with other files?
            stdout = result['StdOut']
            error = result['StdErr']
            fileList = result['FileList']
            if stdout:
                result = pilotDB.storePilotOutput(pilotReference, stdout,
                                                  error)
                if not result['OK']:
                    gLogger.error('Failed to store pilot output:',
                                  result['Message'])

            resultDict = {}
            resultDict['StdOut'] = stdout
            resultDict['StdErr'] = error
            resultDict['OwnerDN'] = owner
            resultDict['OwnerGroup'] = group
            resultDict['FileList'] = fileList
            return S_OK(resultDict)
        else:
            # Instantiate the appropriate CE
            ceFactory = ComputingElementFactory()
            result = Resources(group=group).getQueueDescription(
                pilotDict['Queue'])
            if not result['OK']:
                return result
            queueDict = result['Value']
            result = ceFactory.getCE(gridType, pilotDict['DestinationSite'],
                                     queueDict)
            if not result['OK']:
                return result
            ce = result['Value']
            pilotStamp = pilotDict['PilotStamp']
            pRef = pilotReference
            if pilotStamp:
                pRef = pRef + ':::' + pilotStamp
            result = ce.getJobOutput(pRef)
            if not result['OK']:
                return result
            stdout, error = result['Value']
            if stdout:
                result = pilotDB.storePilotOutput(pilotReference, stdout,
                                                  error)
                if not result['OK']:
                    gLogger.error('Failed to store pilot output:',
                                  result['Message'])

            resultDict = {}
            resultDict['StdOut'] = stdout
            resultDict['StdErr'] = error
            resultDict['OwnerDN'] = owner
            resultDict['OwnerGroup'] = group
            resultDict['FileList'] = []
            return S_OK(resultDict)
Ejemplo n.º 36
0
import sys, os
import DIRAC
from DIRAC import gLogger, gConfig
from DIRAC.Resources.Catalog.FileCatalogFactory import FileCatalogFactory
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources

if __name__ == "__main__":

    result = getVOfromProxyGroup()
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(1)
    vo = result['Value']
    resources = Resources(vo=vo)

    result = gConfig.getSections("/LocalSite/Catalogs")
    if result['OK']:
        catalogs = result['Value']

    userCatalogs = []
    for switch in Script.getUnprocessedSwitches():
        if switch[0].lower() == "f" or switch[0].lower() == "file-catalog":
            userCatalogs.append(switch[1])
    if userCatalogs:
        catalogs = userCatalogs

    from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
    from DIRAC.DataManagementSystem.Client.FileCatalogClientCLI import FileCatalogClientCLI
    if catalogs:
Ejemplo n.º 37
0
class Synchronizer(object):
    '''
  Every time there is a successful write on the CS, Synchronizer().sync() is 
  executed. It updates the database with the values on the CS.
  '''
    def __init__(self):
        """
    Constructor.
    
    examples:
      >>> s = Synchronizer()
    """

        self.log = gLogger.getSubLogger(self.__class__.__name__)
        self.operations = Operations()
        self.resources = Resources()

        self.rStatus = ResourceStatusClient.ResourceStatusClient()
        self.rssConfig = RssConfiguration()

        self.diracAdmin = DiracAdmin()

    def sync(self, _eventName, _params):
        '''
    Main synchronizer method. It synchronizes the three types of elements: Sites,
    Resources and Nodes. Each _syncX method returns a dictionary with the additions
    and deletions.
    
    examples:
      >>> s.sync( None, None )
          S_OK()
    
    :Parameters:
      **_eventName** - any
        this parameter is ignored, but needed by caller function.
      **_params** - any
        this parameter is ignored, but needed by caller function.
    
    :return: S_OK
    '''

        defSyncResult = {'added': [], 'deleted': []}

        # Sites
        syncSites = self._syncSites()
        if not syncSites['OK']:
            self.log.error(syncSites['Message'])
        syncSites = (syncSites['OK'] and syncSites['Value']) or defSyncResult

        # Resources
        syncResources = self._syncResources()
        if not syncResources['OK']:
            self.log.error(syncResources['Message'])
        syncResources = (syncResources['OK']
                         and syncResources['Value']) or defSyncResult

        # Nodes
        syncNodes = self._syncNodes()
        if not syncNodes['OK']:
            self.log.error(syncNodes['Message'])
        syncNodes = (syncNodes['OK'] and syncNodes['Value']) or defSyncResult

        # Notify via email to :
        self.notify(syncSites, syncResources, syncNodes)

        return S_OK()

    def notify(self, syncSites, syncResources, syncNodes):
        """
    Method sending email notification with the result of the synchronization. Email
    is sent to Operations( EMail/Production ) email address.
    
    examples:
      >>> s.notify( {}, {}, {} )
      >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} )
      >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, 
                    { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} )
    
    :Parameters:
      **syncSites** - dict() ( keys: added, deleted )
        dictionary with the sites added and deleted from the DB
      **syncResources** - dict() ( keys: added, deleted )
        dictionary with the resources added and deleted from the DB
      **syncNodes** - dict() ( keys: added, deleted )
        dictionary with the nodes added and deleted from the DB
      
    :return: S_OK
    """

        # Human readable summary
        msgBody = self.getBody(syncSites, syncResources, syncNodes)
        self.log.info(msgBody)

        # Email addresses
        toAddress = self.operations.getValue('EMail/Production', '')
        fromAddress = self.rssConfig.getConfigFromAddress('')

        if toAddress and fromAddress and msgBody:

            # Subject of the email
            setup = gConfig.getValue('DIRAC/Setup')
            subject = '[RSS](%s) CS Synchronization' % setup

            self.diracAdmin.sendMail(toAddress,
                                     subject,
                                     msgBody,
                                     fromAddress=fromAddress)

    def getBody(self, syncSites, syncResources, syncNodes):
        """
    Method that given the outputs of the three synchronization methods builds a
    human readable string.
    
    examples:
      >>> s.getBody( {}, {}, {} )
          ''
      >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} )
          '''
          SITES:
          Site:
            deleted:1
              RubbishSite
          '''
      >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, 
                     { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} )    
          '''
          SITES:
          Site:
            deleted:1
              RubbishSite
          RESOURCES:
          Computing:
            added:2
              newCE01
              newCE02    
          '''
          
    :Parameters:
      **syncSites** - dict() ( keys: added, deleted )
        dictionary with the sites added and deleted from the DB
      **syncResources** - dict() ( keys: added, deleted )
        dictionary with the resources added and deleted from the DB
      **syncNodes** - dict() ( keys: added, deleted )
        dictionary with the nodes added and deleted from the DB
      
    :return: str    
    """

        syncMsg = ''

        for element, syncResult in [('SITES', syncSites),
                                    ('RESOURCES', syncResources),
                                    ('NODES', syncNodes)]:

            elementsMsg = ''

            for elementType, elements in syncResult.items():

                elementMsg = ''
                if elements['added']:
                    elementMsg += '\n  %s added: %d \n' % (
                        elementType, len(elements['added']))
                    elementMsg += '    ' + '\n    '.join(elements['added'])
                if elements['deleted']:
                    elementMsg += '\n  %s deleted: %d \n' % (
                        elementType, len(elements['deleted']))
                    elementMsg += '    ' + '\n    '.join(elements['deleted'])

                if elementMsg:
                    elementsMsg += '\n\n%s:\n' % elementType
                    elementsMsg += elementMsg

            if elementsMsg:
                syncMsg += '\n\n%s:' % element + elementsMsg

        return syncMsg

    #.............................................................................
    # Sync methods: Site, Resource & Node

    def _syncSites(self):
        """
    Method that synchronizes sites ( using their canonical name: CERN.ch ) with
    elementType = 'Site'. It gets from the CS the eligible site names and then
    synchronizes them with the DB. If not on the DB, they are added. If in the DB
    but not on the CS, they are deleted.
    
    examples:
      >> s._syncSites()
         S_OK( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] } } )
    
    :return: S_OK( { 'Site' : { 'added' : [], 'deleted' : [] }} ) | S_ERROR
    """

        # Get site names from the CS
        foundSites = self.resources.getEligibleSites()
        if not foundSites['OK']:
            return foundSites

        sites = {}

        # Synchronize with the DB
        resSync = self.__dbSync('Site', 'Site', foundSites['Value'])
        if not resSync['OK']:
            self.log.error('Error synchronizing Sites')
            self.log.error(resSync['Message'])
        else:
            sites = resSync['Value']

        return S_OK({'Site': sites})

    def _syncResources(self):
        """
    Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary
    keys. It makes one sync round per key ( elementType ). Gets from the CS the 
    eligible Resource/<elementType> names and then synchronizes them with the DB. 
    If not on the DB, they are added. If in the DB but not on the CS, they are deleted.
    
    examples:
      >>> s._syncResources() 
          S_OK( { 'Computing' : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] },
                  'Storage'   : { 'added' : [], 'deleted' : [] },
                  ... } ) 
    
    :return: S_OK( { 'RESOURCE_NODE_MAPPINGKey1' : { 'added' : [], 'deleted' : [] }, ...} )
    """

        resources = {}

        # Iterate over the different elementTypes for Resource ( Computing, Storage... )
        for elementType in RESOURCE_NODE_MAPPING.keys():

            # Get Resource / <elementType> names from CS
            foundResources = self.resources.getEligibleResources(elementType)
            if not foundResources['OK']:
                self.log.error(foundResources['Message'])
                continue

            # Translate CS result into a list
            foundResources = foundResources['Value']

            # Synchronize with the DB
            resSync = self.__dbSync('Resource', elementType, foundResources)
            if not resSync['OK']:
                self.log.error('Error synchronizing %s %s' %
                               ('Resource', elementType))
                self.log.error(resSync['Message'])
            else:
                resources[elementType] = resSync['Value']

        return S_OK(resources)

    def _syncNodes(self):
        """
    Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary
    values. It makes one sync round per key ( elementType ). Gets from the CS the 
    eligible Node/<elementType> names and then synchronizes them with the DB. 
    If not on the DB, they are added. If in the DB but not on the CS, they are deleted.
    
    examples:
      >>> s._syncNodes() 
          S_OK( { 'Queue' : { 'added' : [], 'deleted' : [] },
                  ... } ) 
    
    :return: S_OK( { 'RESOURCE_NODE_MAPPINGValue1' : { 'added' : [], 'deleted' : [] }, ...} )
    """

        nodes = {}

        # Iterate over the different elementTypes for Node ( Queue, AccessProtocol... )
        for elementType in RESOURCE_NODE_MAPPING.values():

            # Get Node / <elementType> names from CS
            foundNodes = self.resources.getEligibleNodes(elementType)
            if not foundNodes['OK']:
                self.log.error(foundNodes['Value'])
                continue

            # Translate CS result into a list : maps NodeName to SiteName<>NodeName to
            # avoid duplicates
            # Looong list comprehension, sorry !
            foundNodes = [
                '%s<>%s' % (key, item)
                for key, subDict in foundNodes['Value'].items()
                for subList in subDict.values() for item in subList
            ]

            # Synchronize with the DB
            resSync = self.__dbSync('Node', elementType, foundNodes)
            if not resSync['OK']:
                self.log.error('Error synchronizing %s %s' %
                               ('Node', elementType))
                self.log.error(resSync['Message'])
            else:
                nodes[elementType] = resSync['Value']

        return S_OK(nodes)

    #.............................................................................
    # DB sync actions

    def __dbSync(self, elementFamily, elementType, elementsCS):
        """
    Method synchronizing CS and DB. Compares <elementsCS> with <elementsDB>
    given the elementFamily and elementType ( e.g. Resource / Computing ).
    If there are missing elements in the DB, are inserted. If are missing elements
    in the CS, are deleted from the DB. Note that the logs from the RSS DB
    are kept ! ( just in case ).
    
    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>
      **elementsCS** - list
        list with the elements for <elementFamily>/<elementType> found in the CS  
    
    :return: S_OK( { 'added' : [], 'deleted' : [] } ) | S_ERROR
    """

        # deleted, added default response
        syncRes = {
            'deleted': [],
            'added': [],
        }

        # Gets <elementFamily>/<elementType> elements from DB
        elementsDB = self.rStatus.selectStatusElement(
            elementFamily,
            'Status',
            elementType=elementType,
            meta={'columns': ['name']})
        if not elementsDB['OK']:
            return elementsDB
        elementsDB = [elementDB[0] for elementDB in elementsDB['Value']]

        # Elements in DB but not in CS -> to be deleted
        toBeDeleted = list(set(elementsDB).difference(set(elementsCS)))
        if toBeDeleted:
            resDelete = self.__dbDelete(elementFamily, elementType,
                                        toBeDeleted)
            if not resDelete['OK']:
                return resDelete
            else:
                syncRes['deleted'] = toBeDeleted

        # Elements in CS but not in DB -> to be added
        toBeAdded = list(set(elementsCS).difference(set(elementsDB)))
        if toBeAdded:
            resInsert = self.__dbInsert(elementFamily, elementType, toBeAdded)
            if not resInsert['OK']:
                return resInsert
            else:
                syncRes['added'] = toBeAdded

        return S_OK(syncRes)

    def __dbDelete(self, elementFamily, elementType, toBeDeleted):
        """
    Method that given the elementFamily and elementType, deletes all entries
    in the History and Status tables for the given elements in toBeDeleted ( all
    their status Types ).

    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>, just used for logging
        purposes.
      **toBeDeleted** - list
        list with the elements to be deleted  
    
    :return: S_OK | S_ERROR    
    """

        self.log.info('Deleting %s %s:' % (elementFamily, elementType))
        self.log.info(toBeDeleted)

        return self.rStatus._extermineStatusElement(elementFamily, toBeDeleted)

    def __dbInsert(self, elementFamily, elementType, toBeAdded):
        """
    Method that given the elementFamily and elementType, adds all elements in
    toBeAdded with their respective statusTypes, obtained from the CS. They 
    are synchronized with status 'Unknown' and reason 'Synchronized'.

    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>
      **toBeDeleted** - list
        list with the elements to be added  
    
    :return: S_OK | S_ERROR    
    """

        self.log.info('Adding %s %s:' % (elementFamily, elementType))
        self.log.info(toBeAdded)

        statusTypes = self.rssConfig.getConfigStatusType(elementType)

        for element in toBeAdded:

            for statusType in statusTypes:

                resInsert = self.rStatus.addIfNotThereStatusElement(
                    elementFamily,
                    'Status',
                    name=element,
                    statusType=statusType,
                    status='Unknown',
                    elementType=elementType,
                    reason='Synchronized')

                if not resInsert['OK']:
                    return resInsert

        return S_OK()


#...............................................................................

#
#  def _syncUsers( self ):
#    '''
#      Sync Users: compares CS with DB and does the necessary modifications.
#    '''
#
#    gLogger.verbose( '-- Synchronizing users --')
#
#    usersCS = CSHelpers.getRegistryUsers()
#    if not usersCS[ 'OK' ]:
#      return usersCS
#    usersCS = usersCS[ 'Value' ]
#
#    gLogger.verbose( '%s users found in CS' % len( usersCS ) )
#
#    usersDB = self.rManagement.selectUserRegistryCache( meta = { 'columns' : [ 'login' ] } )
#    if not usersDB[ 'OK' ]:
#      return usersDB
#    usersDB = [ userDB[0] for userDB in usersDB[ 'Value' ] ]
#
#    # Users that are in DB but not in CS
#    toBeDeleted = list( set( usersDB ).difference( set( usersCS.keys() ) ) )
#    gLogger.verbose( '%s users to be deleted' % len( toBeDeleted ) )
#
#    # Delete users
#    # FIXME: probably it is not needed since there is a DatabaseCleanerAgent
#    for userLogin in toBeDeleted:
#
#      deleteQuery = self.rManagement.deleteUserRegistryCache( login = userLogin )
#
#      gLogger.verbose( '... %s' % userLogin )
#      if not deleteQuery[ 'OK' ]:
#        return deleteQuery
#
#    # AddOrModify Users
#    for userLogin, userDict in usersCS.items():
#
#      _name  = userDict[ 'DN' ].split( '=' )[ -1 ]
#      _email = userDict[ 'Email' ]
#
#      query = self.rManagement.addOrModifyUserRegistryCache( userLogin, _name, _email )
#      gLogger.verbose( '-> %s' % userLogin )
#      if not query[ 'OK' ]:
#        return query
#
#    return S_OK()

################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
Ejemplo n.º 38
0
    ceFlag = True
    return DIRAC.S_OK()


Script.registerSwitch("V:", "vo=",
                      "choose resources eligible for the given VO", setVO)
Script.registerSwitch("S", "se", "display storage element information",
                      setSEFlag)
Script.registerSwitch("C", "ce", "display computing element information",
                      setSEFlag)
Script.setUsageMessage('\n'.join([
    __doc__.split('\n')[1],
    'Usage:',
    '  %s [option|cfgfile] ...' % Script.scriptName,
]))
Script.parseCommandLine()

from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources, getSites

resources = Resources(vo='biomed')

result = resources.getEligibleSites()
if not result['OK']:
    print "ERROR:", result['Message']

siteList = [
    resources.getSiteFullName(site)['Value'] for site in result['Value']
]

print siteList
Ejemplo n.º 39
0
class StorageFactory:
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)

    ###########################################################################################
    #
    # Below are public methods for obtaining storage objects
    #

    def getStorageName(self, initialName):
        return self._getConfigStorageName(initialName)

    def getStorage(self, parameterDict):
        """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
        # The storage name must be supplied.
        if parameterDict.has_key('StorageName'):
            storageName = parameterDict['StorageName']
        else:
            errStr = "StorageFactory.getStorage: StorageName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # ProtocolName must be supplied otherwise nothing with work.
        if parameterDict.has_key('ProtocolName'):
            protocolName = parameterDict['ProtocolName']
        else:
            errStr = "StorageFactory.getStorage: ProtocolName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # The other options need not always be specified
        if parameterDict.has_key('Protocol'):
            protocol = parameterDict['Protocol']
        else:
            protocol = ''

        if parameterDict.has_key('Port'):
            port = parameterDict['Port']
        else:
            port = ''

        if parameterDict.has_key('Host'):
            host = parameterDict['Host']
        else:
            host = ''

        if parameterDict.has_key('Path'):
            path = parameterDict['Path']
        else:
            path = ''

        if parameterDict.has_key('SpaceToken'):
            spaceToken = parameterDict['SpaceToken']
        else:
            spaceToken = ''

        if parameterDict.has_key('WSUrl'):
            wsPath = parameterDict['WSUrl']
        else:
            wsPath = ''

        return self.__generateStorageObject(storageName, protocolName,
                                            protocol, path, host, port,
                                            spaceToken, wsPath)

    def getStorages(self, storageName, protocolList=[]):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'protocolList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
        self.remoteProtocols = []
        self.localProtocols = []
        self.name = ''
        self.options = {}
        self.protocolDetails = []
        self.storages = []

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName)
        if not res['OK']:
            self.valid = False
            return res
        storageName = res['Value']
        self.name = storageName

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(storageName)
        if not res['OK']:
            self.valid = False
            return res
        self.options = res['Value']

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(storageName)
        if not res['OK']:
            self.valid = False
            return res
        self.protocolDetails = res['Value']

        requestedLocalProtocols = []
        requestedRemoteProtocols = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        self.storages = []
        for protocolDict in self.protocolDetails:
            protocolName = protocolDict['ProtocolName']
            protocolRequested = True
            if protocolList:
                if protocolName not in protocolList:
                    protocolRequested = False
            if protocolRequested:
                protocol = protocolDict['Protocol']
                host = protocolDict['Host']
                path = protocolDict['Path']
                port = protocolDict['Port']
                spaceToken = protocolDict['SpaceToken']
                wsUrl = protocolDict['WSUrl']
                res = self.__generateStorageObject(storageName,
                                                   protocolName,
                                                   protocol,
                                                   path=path,
                                                   host=host,
                                                   port=port,
                                                   spaceToken=spaceToken,
                                                   wsUrl=wsUrl)
                if res['OK']:
                    self.storages.append(res['Value'])
                    if protocolName in self.localProtocols:
                        turlProtocols.append(protocol)
                        requestedLocalProtocols.append(protocolName)
                    if protocolName in self.remoteProtocols:
                        requestedRemoteProtocols.append(protocolName)
                    requestedProtocolDetails.append(protocolDict)
                else:
                    gLogger.info(res['Message'])

        if len(self.storages) > 0:
            resDict = {}
            resDict['StorageName'] = self.name
            resDict['StorageOptions'] = self.options
            resDict['StorageObjects'] = self.storages
            resDict['LocalProtocols'] = requestedLocalProtocols
            resDict['RemoteProtocols'] = requestedRemoteProtocols
            resDict['ProtocolOptions'] = requestedProtocolDetails
            resDict['TurlProtocols'] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)

    ###########################################################################################
    #
    # Below are internal methods for obtaining section/option/value configuration
    #

    def _getConfigStorageName(self, storageName):
        """
      This gets the name of the storage the configuration service.
      If the storage is an alias for another the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result['OK']:
            errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
            gLogger.error(errStr, result['Message'])
            return S_ERROR(errStr)
        if not result['Value']:
            errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
            gLogger.error(errStr, storageName)
            return S_ERROR(errStr)

        seConfig = result['Value']
        resolvedName = seConfig.get('Alias', storageName)
        return S_OK(resolvedName)

    def _getConfigStorageOptions(self, storageName):
        """ Get the options associated to the StorageElement as defined in the CS
    """

        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result['OK']:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
            gLogger.error(errStr, "%s: %s" % (storageName, result['Message']))
            return S_ERROR(errStr)
        optionsDict = result['Value']

        result = self.resourceStatus.getStorageElementStatus(storageName)
        if not result['OK']:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
            gLogger.error(errStr, "%s: %s" % (storageName, result['Message']))
            return S_ERROR(errStr)
        optionsDict.update(result['Value'][storageName])

        return S_OK(optionsDict)

    def _getConfigStorageProtocols(self, storageName):
        """ Protocol specific information is present as sections in the Storage configuration
    """
        result = getSiteForResource('Storage', storageName)
        if not result['OK']:
            return result
        site = result['Value']
        result = self.resourcesHelper.getEligibleNodes('AccessProtocol', {
            'Site': site,
            'Resource': storageName
        })
        if not result['OK']:
            return result
        nodesDict = result['Value']
        protocols = []
        for site in nodesDict:
            for se in nodesDict[site]:
                protocols.extend(nodesDict[site][se])
        sortedProtocols = sortList(protocols)
        protocolDetails = []
        for protocol in sortedProtocols:
            result = self._getConfigStorageProtocolDetails(
                storageName, protocol)
            if not result['OK']:
                return result
            protocolDetails.append(result['Value'])
        self.protocols = self.localProtocols + self.remoteProtocols
        return S_OK(protocolDetails)

    def _getConfigStorageProtocolDetails(self, storageName, protocol):
        """
      Parse the contents of the protocol block
    """
        # First obtain the options that are available
        result = getSiteForResource('Storage', storageName)
        if not result['OK']:
            return result
        site = result['Value']
        result = self.resourcesHelper.getNodeOptionsDict(
            site, 'Storage', storageName, protocol)
        if not result['OK']:
            return result
        optionsDict = result['Value']

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            'Access': '',
            'Host': '',
            'Path': '',
            'Port': '',
            'Protocol': '',
            'ProtocolName': '',
            'SpaceToken': '',
            'WSUrl': ''
        }
        for option in optionsDict:
            protocolDict[option] = optionsDict[option]

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set.
        if protocolDict['Access'] == 'remote':
            self.remoteProtocols.append(protocolDict['ProtocolName'])
        elif protocolDict['Access'] == 'local':
            self.localProtocols.append(protocolDict['ProtocolName'])
        else:
            errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % (
                storageName, protocol)
            gLogger.warn(errStr)

        # The ProtocolName option must be defined
        if not protocolDict['ProtocolName']:
            errStr = "StorageFactory.__getProtocolDetails: 'ProtocolName' option is not defined."
            gLogger.error(errStr, "%s: %s" % (storageName, protocol))
            return S_ERROR(errStr)
        return S_OK(protocolDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the object instantiated for a provided storage configuration
    #

    def __generateStorageObject(self,
                                storageName,
                                protocolName,
                                protocol,
                                path=None,
                                host=None,
                                port=None,
                                spaceToken=None,
                                wsUrl=None):

        storageType = protocolName
        if self.proxy:
            storageType = 'Proxy'

        moduleRootPaths = getInstalledExtensions()
        moduleLoaded = False
        path = path.rstrip('/')
        if not path:
            path = '/'
        for moduleRootPath in moduleRootPaths:
            if moduleLoaded:
                break
            gLogger.verbose("Trying to load from root path %s" %
                            moduleRootPath)
            moduleFile = os.path.join(rootPath, moduleRootPath, "Resources",
                                      "Storage", "%sStorage.py" % storageType)
            gLogger.verbose("Looking for file %s" % moduleFile)
            if not os.path.isfile(moduleFile):
                continue
            try:
                # This inforces the convention that the plug in must be named after the protocol
                moduleName = "%sStorage" % (storageType)
                storageModule = __import__(
                    '%s.Resources.Storage.%s' % (moduleRootPath, moduleName),
                    globals(), locals(), [moduleName])
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % (
                    storageName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            try:
                evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
                storage = eval(evalString)
                if not storage.isOK():
                    errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
                    gLogger.error(errStr, "%s" % (moduleName))
                    return S_ERROR(errStr)
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % (
                    moduleName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            # If use proxy, keep the original protocol name
            if self.proxy:
                storage.protocolName = protocolName
            return S_OK(storage)
Ejemplo n.º 40
0
    def __lookForCE(self):

        knownces = self.am_getOption('BannedCEs', [])

        resources = Resources(self.voName)
        result = resources.getEligibleResources('Computing',
                                                {'CEType': ['LCG', 'CREAM']})
        if not result['OK']:
            return result

        siteDict = result['Value']
        for site in siteDict:
            knownces += siteDict[site]


#    result = gConfig.getSections( '/Resources/Sites' )
#    if not result['OK']:
#      return
#    grids = result['Value']
#
#    for grid in grids:
#
#      result = gConfig.getSections( '/Resources/Sites/%s' % grid )
#      if not result['OK']:
#        return
#      sites = result['Value']
#
#      for site in sites:
#        opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
#        ces = List.fromChar( opt.get( 'CE', '' ) )
#        knownces += ces

        response = ldapCEState('', vo=self.voName)
        if not response['OK']:
            self.log.error("Error during BDII request", response['Message'])
            response = self.__checkAlternativeBDIISite(ldapCEState, '',
                                                       self.voName)
            return response

        newces = {}
        for queue in response['Value']:
            try:
                queuename = queue['GlueCEUniqueID']
            except:
                continue

            cename = queuename.split(":")[0]
            if not cename in knownces:
                newces[cename] = None
                self.log.debug("newce", cename)

        body = ""
        possibleNewSites = []
        for ce in newces.iterkeys():
            response = ldapCluster(ce)
            if not response['OK']:
                self.log.warn("Error during BDII request", response['Message'])
                response = self.__checkAlternativeBDIISite(ldapCluster, ce)
                continue
            clusters = response['Value']
            if len(clusters) != 1:
                self.log.warn("Error in cluster length",
                              " CE %s Length %d" % (ce, len(clusters)))
            if len(clusters) == 0:
                continue
            cluster = clusters[0]
            fkey = cluster.get('GlueForeignKey', [])
            if type(fkey) == type(''):
                fkey = [fkey]
            nameBDII = None
            for entry in fkey:
                if entry.count('GlueSiteUniqueID'):
                    nameBDII = entry.split('=')[1]
                    break
            if not nameBDII:
                continue

            cestring = "CE: %s, GOCDB Name: %s" % (ce, nameBDII)
            self.log.info(cestring)

            response = ldapCE(ce)
            if not response['OK']:
                self.log.warn("Error during BDII request", response['Message'])
                response = self.__checkAlternativeBDIISite(ldapCE, ce)
                continue

            ceinfos = response['Value']
            if len(ceinfos):
                ceinfo = ceinfos[0]
                systemName = ceinfo.get('GlueHostOperatingSystemName',
                                        'Unknown')
                systemVersion = ceinfo.get('GlueHostOperatingSystemVersion',
                                           'Unknown')
                systemRelease = ceinfo.get('GlueHostOperatingSystemRelease',
                                           'Unknown')
            else:
                systemName = "Unknown"
                systemVersion = "Unknown"
                systemRelease = "Unknown"

            osstring = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % (
                systemName, systemVersion, systemRelease)
            self.log.info(osstring)

            response = ldapCEState(ce, vo=self.voName)
            if not response['OK']:
                self.log.warn("Error during BDII request", response['Message'])
                response = self.__checkAlternativeBDIISite(
                    ldapCEState, ce, self.voName)
                continue

            newcestring = "\n\n%s\n%s" % (cestring, osstring)
            usefull = False
            cestates = response['Value']
            for cestate in cestates:
                queuename = cestate.get('GlueCEUniqueID', 'UnknownName')
                queuestatus = cestate.get('GlueCEStateStatus', 'UnknownStatus')

                queuestring = "%s %s" % (queuename, queuestatus)
                self.log.info(queuestring)
                newcestring += "\n%s" % queuestring
                if queuestatus.count('Production'):
                    usefull = True
            if usefull:
                body += newcestring
                possibleNewSites.append(
                    'dirac-admin-add-site DIRACSiteName %s %s' %
                    (nameBDII, ce))
        if body:
            body = "We are glade to inform You about new CE(s) possibly suitable for %s:\n" % self.voName + body
            body += "\n\nTo suppress information about CE add its name to BannedCEs list."
            for possibleNewSite in possibleNewSites:
                body = "%s\n%s" % (body, possibleNewSite)
            self.log.info(body)
            if self.addressTo and self.addressFrom:
                notification = NotificationClient()
                result = notification.sendMail(self.addressTo,
                                               self.subject,
                                               body,
                                               self.addressFrom,
                                               localAttempt=False)

        return S_OK()
Ejemplo n.º 41
0
import DIRAC
from DIRAC                                              import gConfig,gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus   import ResourceStatus
from DIRAC.Core.Utilities.List                          import sortList
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo                      import getVOfromProxyGroup

if __name__ == "__main__":
  
  result = getVOfromProxyGroup()
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 1 )
  vo = result['Value']  
  resources = Resources( vo = vo )
  result = resources.getEligibleStorageElements()
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 2 )
  seList = sortList( result[ 'Value' ] )

  resourceStatus = ResourceStatus()
 
  result = resourceStatus.getStorageStatus( seList )
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 3 )

  for k,v in result[ 'Value' ].items():
    
Ejemplo n.º 42
0
  print 'ERROR: Could not contact Configuration Service'
  exitCode = 2
  DIRAC.exit( exitCode )

res = getProxyInfo()
if not res[ 'OK' ]:
  gLogger.error( 'Failed to get proxy information', res[ 'Message' ] )
  DIRAC.exit( 2 )

userName = res['Value'].get( 'username' )
if not userName:
  gLogger.error( 'Failed to get username for proxy' )
  DIRAC.exit( 2 )

if site:
  res = Resources().getStorageElements( site )
  if not res[ 'OK' ]:
    gLogger.error( 'The provided site (%s) is not known.' % site )
    DIRAC.exit( -1 )
  ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )

if not ses:
  gLogger.error( 'There were no SEs provided' )
  DIRAC.exit( -1 )

readBanned = []
writeBanned = []
checkBanned = []

resourceStatus = ResourceStatus()
Ejemplo n.º 43
0
class ResourcesTestCase( unittest.TestCase ):
  
  def setUp( self ):
  
    Script.disableCS( )
    Script.parseCommandLine()
    self.resources = Resources()
  
  def test_getSites( self ):
  
    print
    result = self.resources.getSites( {'Name':['CERN','CPPM','PNPI']} )
    self.assertTrue( result['OK'], 'getSites' )
    sites = result['Value']
    print sites
    result = self.resources.getEligibleSites( {'Name':['CERN','CPPM','PNPI']} )    
    self.assertTrue( result['OK'], 'getEligibleSites' )
    eligibleSites = result['Value']
    self.assertEqual(sites, eligibleSites, 'sites and eligible sites are the same')

  def test_getResources( self ):
    
    print
    result = self.resources.getResources( 'CERN', 'Storage' )
    self.assertTrue( result['OK'], 'getResources' )
    ses = result['Value']
    print ses
    
  def test_getNodes( self ):
    
    print
    result = self.resources.getNodes( 'CERN::ce130', 'Queue'  )
    self.assertTrue( result['OK'], 'getNodes' )
    nodes = result['Value']
    print nodes  
    
  def test_getEligibleResources( self ):
    
    print 
    result = self.resources.getEligibleResources( 'Computing', { 'Site':['CERN','CPPM','Zurich'],'SubmissionMode':'Direct' }  )
    self.assertTrue( result['OK'], 'getEligibleResources' )
    ces = result['Value']
    print ces    
    
  def test_getEligibleNodes( self ):
    
    print
    result = self.resources.getEligibleNodes( 'AccessProtocol', 
                                              { 'Site':['CERN','CPPM','Zurich'] },
                                              { 'Protocol':'srm' }  )
    self.assertTrue( result['OK'], 'getEligibleNodes' )
    aps = result['Value']
    print aps   
    
  def test_getEligibleComputingElements( self ):
    
    siteMask = ['LCG.CERN.ch','LCG.CPPM.fr']
    
    result = self.resources.getEligibleResources( 'Computing', {'Site':siteMask,
                                                                'SubmissionMode':'gLite',
                                                                'CEType':['LCG','CREAM']} )  
    self.assertTrue( result['OK'], 'getEligibleResources' )
    print
    for ce in result['Value']:
      ceHost = self.resources.getComputingElementValue( ce, 'Host', 'unknown' )
      print ce, ceHost 
Ejemplo n.º 44
0
def initSEs():
  '''
    Initializes SEs statuses taking their values from the CS.
  '''

  subLogger.info( 'Initializing SEs' )
  
  resources = Resources()
  
  ses = resources.getEligibleStorageElements()
  if not ses[ 'OK' ]:
    return ses
  ses = ses[ 'Value' ]  

  statuses    = StateMachine.RSSMachine( None ).getStates()
  statusTypes = RssConfiguration.RssConfiguration().getConfigStatusType( 'StorageElement' )
  reason      = 'dirac-rss-sync'
  
  subLogger.debug( statuses )
  subLogger.debug( statusTypes )
  
  rssClient = ResourceStatusClient.ResourceStatusClient()
  
  for se in ses:

    subLogger.debug( se )

    #opts = gConfig.getOptionsDict( '/Resources/StorageElements/%s' % se )
    opts = resources.getStorageElementOptionsDict( se )
    if not opts[ 'OK' ]:
      subLogger.warn( opts[ 'Message' ] )
      continue
    opts = opts[ 'Value' ]
    
    subLogger.debug( opts )
    
    # We copy the list into a new object to remove items INSIDE the loop !
    statusTypesList = statusTypes[:]
          
    for statusType, status in opts.iteritems():    
    
      #Sanity check...
      if not statusType in statusTypesList:
        continue

      #Transforms statuses to RSS terms
      if status in ( 'NotAllowed', 'InActive' ):
        status = 'Banned'
        
      if not status in statuses:
        subLogger.error( '%s not a valid status for %s - %s' % ( status, se, statusType ) )
        continue

      # We remove from the backtracking
      statusTypesList.remove( statusType )

      subLogger.debug( [ se,statusType,status,reason ] )
      result = rssClient.modifyStatusElement( 'Resource', 'Status', name = se,
                                              statusType = statusType, status = status,
                                              reason = reason )
      
      if not result[ 'OK' ]:
        subLogger.error( 'Failed to modify' )
        subLogger.error( result[ 'Message' ] )
        continue
      
    #Backtracking: statusTypes not present on CS
    for statusType in statusTypesList:

      result = rssClient.modifyStatusElement( 'Resource', 'Status', name = se,
                                              statusType = statusType, status = DEFAULT_STATUS,
                                              reason = reason )
      if not result[ 'OK' ]:
        subLogger.error( 'Error in backtracking for %s,%s,%s' % ( se, statusType, status ) )
        subLogger.error( result[ 'Message' ] )
        
  return S_OK()
Ejemplo n.º 45
0
 def setUp( self ):
 
   Script.disableCS( )
   Script.parseCommandLine()
   self.resources = Resources()
Ejemplo n.º 46
0
import DIRAC
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Utilities.List import sortList
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup

if __name__ == "__main__":

    result = getVOfromProxyGroup()
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(1)
    vo = result['Value']
    resources = Resources(vo=vo)
    result = resources.getEligibleStorageElements()
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(2)
    seList = sortList(result['Value'])

    resourceStatus = ResourceStatus()

    result = resourceStatus.getStorageStatus(seList)
    if not result['OK']:
        gLogger.notice('Error:', result['Message'])
        DIRAC.exit(3)

    for k, v in result['Value'].items():
Ejemplo n.º 47
0
  def _resolveCECandidates( self, taskQueueDict ):
    """
      Return a list of CEs for this TaskQueue
    """
    # assume user knows what they're doing and avoid site mask e.g. sam jobs
    if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
      self.log.info( 'CEs requested by TaskQueue %s:' % taskQueueDict['TaskQueueID'],
                     ', '.join( taskQueueDict['GridCEs'] ) )
      return taskQueueDict['GridCEs']

    # Get the mask
    siteStatus = SiteStatus()
    ret = siteStatus.getUsableSites( 'ComputingAccess' )
    if not ret['OK']:
      self.log.error( 'Can not retrieve site Mask from DB:', ret['Message'] )
      return []

    usableSites = ret['Value']
    if not usableSites:
      self.log.error( 'Site mask is empty' )
      return []

    self.log.verbose( 'Site Mask: %s' % ', '.join( usableSites ) )

    # remove banned sites from siteMask
    if 'BannedSites' in taskQueueDict:
      for site in taskQueueDict['BannedSites']:
        if site in usableSites:
          usableSites.remove( site )
          self.log.verbose( 'Removing banned site %s from site Mask' % site )

    # remove from the mask if a Site is given
    siteMask = [ site for site in usableSites if 'Sites' not in taskQueueDict or site in taskQueueDict['Sites'] ]

    if not siteMask:
      # pilot can not be submitted
      self.log.info( 'No Valid Site Candidate in Mask for TaskQueue %s' % taskQueueDict['TaskQueueID'] )
      return []

    self.log.info( 'Site Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) )

    # Get CE's associates to the given site Names
    ceMask = []

    resources = Resources( vo = self.virtualOrganization )
    result = resources.getEligibleResources( 'Computing', {'Site':siteMask,
                                                           'SubmissionMode':'gLite',
                                                           'CEType':['LCG','CREAM']} )
    if not result['OK']:
      self.log.error( "Failed to get eligible ce's:", result['Message'] )
      return []
    ces = result['Value']

    for ce in ces:
      ceHost = resources.getComputingElementValue( ce, 'Host', 'unknown' )
      if ceHost != 'unknown':
        ceMask.append( ceHost )

    if not ceMask:
      self.log.info( 'No CE Candidate found for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( siteMask ) )

    self.log.verbose( 'CE Candidates for TaskQueue %s:' % taskQueueDict['TaskQueueID'], ', '.join( ceMask ) )

    return ceMask
Ejemplo n.º 48
0
  def __init__( self, name, protocols = None, vo = None ):
    """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    self.resources = Resources( vo = self.vo )

    proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
    result = self.resources.getAccessProtocols( name )
    if result['OK']:
      ap = result['Value'][0]
      useProxy = ( self.resources.getAccessProtocolValue( ap, "Protocol", "UnknownProtocol" )
                   in proxiedProtocols )

    #print "Proxy", name, proxiedProtocols, \
    #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

    if not useProxy:
      useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if protocols == None:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
    else:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localProtocols = factoryDict['LocalProtocols']
      self.remoteProtocols = factoryDict['RemoteProtocols']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )

    self.readMethods = [ 'getFile',
                         'getAccessUrl',
                         'getTransportURL',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                           ]

    self.okMethods = [ 'getLocalProtocols',
                       'getPfnForProtocol',
                       'getPfnForLfn',
                       'getPfnPath',
                       'getProtocols',
                       'getRemoteProtocols',
                       'getStorageElementName',
                       'getStorageElementOption',
                       'getStorageParameters',
                       'isLocalSE' ]

    self.__resourceStatus = ResourceStatus()
Ejemplo n.º 49
0
class StorageFactory:
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)

    ###########################################################################################
    #
    # Below are public methods for obtaining storage objects
    #

    def getStorageName(self, initialName):
        return self._getConfigStorageName(initialName)

    def getStorage(self, parameterDict):
        """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
        # The storage name must be supplied.
        if parameterDict.has_key("StorageName"):
            storageName = parameterDict["StorageName"]
        else:
            errStr = "StorageFactory.getStorage: StorageName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # ProtocolName must be supplied otherwise nothing with work.
        if parameterDict.has_key("ProtocolName"):
            protocolName = parameterDict["ProtocolName"]
        else:
            errStr = "StorageFactory.getStorage: ProtocolName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # The other options need not always be specified
        if parameterDict.has_key("Protocol"):
            protocol = parameterDict["Protocol"]
        else:
            protocol = ""

        if parameterDict.has_key("Port"):
            port = parameterDict["Port"]
        else:
            port = ""

        if parameterDict.has_key("Host"):
            host = parameterDict["Host"]
        else:
            host = ""

        if parameterDict.has_key("Path"):
            path = parameterDict["Path"]
        else:
            path = ""

        if parameterDict.has_key("SpaceToken"):
            spaceToken = parameterDict["SpaceToken"]
        else:
            spaceToken = ""

        if parameterDict.has_key("WSUrl"):
            wsPath = parameterDict["WSUrl"]
        else:
            wsPath = ""

        return self.__generateStorageObject(
            storageName, protocolName, protocol, path, host, port, spaceToken, wsPath, parameterDict
        )

    def getStorages(self, storageName, protocolList=[]):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'protocolList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
        self.remoteProtocols = []
        self.localProtocols = []
        self.name = ""
        self.options = {}
        self.protocolDetails = []
        self.storages = []

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        storageName = res["Value"]
        self.name = storageName

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        self.options = res["Value"]

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        self.protocolDetails = res["Value"]

        requestedLocalProtocols = []
        requestedRemoteProtocols = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        self.storages = []
        for protocolDict in self.protocolDetails:
            protocolName = protocolDict["ProtocolName"]
            protocolRequested = True
            if protocolList:
                if protocolName not in protocolList:
                    protocolRequested = False
            if protocolRequested:
                protocol = protocolDict["Protocol"]
                host = protocolDict["Host"]
                path = protocolDict["Path"]
                port = protocolDict["Port"]
                spaceToken = protocolDict["SpaceToken"]
                wsUrl = protocolDict["WSUrl"]
                res = self.__generateStorageObject(
                    storageName,
                    protocolName,
                    protocol,
                    path=path,
                    host=host,
                    port=port,
                    spaceToken=spaceToken,
                    wsUrl=wsUrl,
                    parameters=protocolDict,
                )
                if res["OK"]:
                    self.storages.append(res["Value"])
                    if protocolName in self.localProtocols:
                        turlProtocols.append(protocol)
                        requestedLocalProtocols.append(protocolName)
                    if protocolName in self.remoteProtocols:
                        requestedRemoteProtocols.append(protocolName)
                    requestedProtocolDetails.append(protocolDict)
                else:
                    gLogger.info(res["Message"])

        if len(self.storages) > 0:
            resDict = {}
            resDict["StorageName"] = self.name
            resDict["StorageOptions"] = self.options
            resDict["StorageObjects"] = self.storages
            resDict["LocalProtocols"] = requestedLocalProtocols
            resDict["RemoteProtocols"] = requestedRemoteProtocols
            resDict["ProtocolOptions"] = requestedProtocolDetails
            resDict["TurlProtocols"] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)

    ###########################################################################################
    #
    # Below are internal methods for obtaining section/option/value configuration
    #

    def _getConfigStorageName(self, storageName):
        """
      This gets the name of the storage the configuration service.
      If the storage is an alias for another the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result["OK"]:
            errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
            gLogger.error(errStr, result["Message"])
            return S_ERROR(errStr)
        if not result["Value"]:
            errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
            gLogger.error(errStr, storageName)
            return S_ERROR(errStr)
        if "Alias" in res["Value"]:
            configPath = "%s/%s/Alias" % (self.rootConfigPath, storageName)
            aliasName = gConfig.getValue(configPath)
            result = self._getConfigStorageName(aliasName)
            if not result["OK"]:
                errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
                gLogger.error(errStr, configPath)
                return S_ERROR(errStr)
            resolvedName = result["Value"]
        else:
            resolvedName = storageName
        return S_OK(resolvedName)

    def _getConfigStorageOptions(self, storageName):
        """ Get the options associated to the StorageElement as defined in the CS
    """

        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result["OK"]:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
            gLogger.error(errStr, "%s: %s" % (storageName, result["Message"]))
            return S_ERROR(errStr)
        optionsDict = result["Value"]

        result = self.resourceStatus.getStorageStatus(storageName, "ReadAccess")
        if not result["OK"]:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
            gLogger.error(errStr, "%s: %s" % (storageName, result["Message"]))
            return S_ERROR(errStr)
        # optionsDict.update( result[ 'Value' ][ storageName ] )

        return S_OK(optionsDict)

    def _getConfigStorageProtocols(self, storageName):
        """ Protocol specific information is present as sections in the Storage configuration
    """
        result = getSiteForResource(storageName)
        if not result["OK"]:
            return result
        site = result["Value"]
        result = self.resourcesHelper.getEligibleNodes("AccessProtocol", {"Site": site, "Resource": storageName})
        if not result["OK"]:
            return result
        nodesList = result["Value"]
        protocols = []
        for node in nodesList:
            protocols.append(node)
        protocolDetails = []
        for protocol in protocols:
            result = self._getConfigStorageProtocolDetails(protocol)
            if not result["OK"]:
                return result
            protocolDetails.append(result["Value"])
        self.protocols = self.localProtocols + self.remoteProtocols
        return S_OK(protocolDetails)

    def _getConfigStorageProtocolDetails(self, protocol):
        """
      Parse the contents of the protocol block
    """

        result = self.resourcesHelper.getAccessProtocolOptionsDict(protocol)
        if not result["OK"]:
            return result
        optionsDict = result["Value"]

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            "Access": "",
            "Host": "",
            "Path": "",
            "Port": "",
            "Protocol": "",
            "ProtocolName": "",
            "SpaceToken": "",
            "WSUrl": "",
        }
        for option in optionsDict:
            protocolDict[option] = optionsDict[option]

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set.
        if protocolDict["Access"] == "remote":
            self.remoteProtocols.append(protocolDict["ProtocolName"])
        elif protocolDict["Access"] == "local":
            self.localProtocols.append(protocolDict["ProtocolName"])
        else:
            errStr = (
                "StorageFactory.__getProtocolDetails: The 'Access' option for %s is neither 'local' or 'remote'."
                % protocol
            )
            gLogger.warn(errStr)

        # The ProtocolName option must be defined
        if not protocolDict["ProtocolName"]:
            errStr = "StorageFactory.__getProtocolDetails: 'ProtocolName' option is not defined."
            gLogger.error(errStr, "%s" % protocol)
            return S_ERROR(errStr)
        return S_OK(protocolDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the object instantiated for a provided storage configuration
    #

    def __generateStorageObject(
        self,
        storageName,
        protocolName,
        protocol,
        path=None,
        host=None,
        port=None,
        spaceToken=None,
        wsUrl=None,
        parameters={},
    ):

        storageType = protocolName
        if self.proxy:
            storageType = "Proxy"

        moduleRootPaths = getInstalledExtensions()
        moduleLoaded = False
        path = path.rstrip("/")
        if not path:
            path = "/"
        for moduleRootPath in moduleRootPaths:
            if moduleLoaded:
                break
            gLogger.verbose("Trying to load from root path %s" % moduleRootPath)
            moduleFile = os.path.join(rootPath, moduleRootPath, "Resources", "Storage", "%sStorage.py" % storageType)
            gLogger.verbose("Looking for file %s" % moduleFile)
            if not os.path.isfile(moduleFile):
                continue
            try:
                # This inforces the convention that the plug in must be named after the protocol
                moduleName = "%sStorage" % (storageType)
                storageModule = __import__(
                    "%s.Resources.Storage.%s" % (moduleRootPath, moduleName), globals(), locals(), [moduleName]
                )
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % (storageName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            try:
                evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
                storage = eval(evalString)
                if not storage.isOK():
                    errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
                    gLogger.error(errStr, "%s" % (moduleName))
                    return S_ERROR(errStr)
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % (moduleName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            # Set extra parameters if any
            if parameters:
                result = storage.setParameters(parameters)
                if not result["OK"]:
                    return result

            # If use proxy, keep the original protocol name
            if self.proxy:
                storage.protocolName = protocolName
            return S_OK(storage)
Ejemplo n.º 50
0
  def __lookForCE( self ):

    knownces = self.am_getOption( 'BannedCEs', [] )

    resources = Resources( self.voName )
    result    = resources.getEligibleResources( 'Computing', {'CEType':['LCG','CREAM'] } ) 
    if not result['OK']:
      return result
    
    knownces = [ resources.getComputingElementValue( x, 'Host' ) for x in result['Value'] ]

#    result = gConfig.getSections( '/Resources/Sites' )
#    if not result['OK']:
#      return
#    grids = result['Value']
#
#    for grid in grids:
#
#      result = gConfig.getSections( '/Resources/Sites/%s' % grid )
#      if not result['OK']:
#        return
#      sites = result['Value']
#
#      for site in sites:
#        opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
#        ces = List.fromChar( opt.get( 'CE', '' ) )
#        knownces += ces

    response = ldapCEState( '', vo = self.voName )
    if not response['OK']:
      self.log.error( "Error during BDII request", response['Message'] )
      response = self.__checkAlternativeBDIISite( ldapCEState, '', self.voName )
      return response

    newces = {}
    for queue in response['Value']:
      try:
        queuename = queue['GlueCEUniqueID']
      except:
        continue

      cename = queuename.split( ":" )[0]
      if not cename in knownces:
        newces[cename] = None
        self.log.debug( "newce", cename )

    body = ""
    possibleNewSites = []
    for ce in newces.iterkeys():
      response = ldapCluster( ce )
      if not response['OK']:
        self.log.warn( "Error during BDII request", response['Message'] )
        response = self.__checkAlternativeBDIISite( ldapCluster, ce )
        continue
      clusters = response['Value']
      if len( clusters ) != 1:
        self.log.warn( "Error in cluster length", " CE %s Length %d" % ( ce, len( clusters ) ) )
      if len( clusters ) == 0:
        continue
      cluster = clusters[0]
      fkey = cluster.get( 'GlueForeignKey', [] )
      if type( fkey ) == type( '' ):
        fkey = [fkey]
      nameBDII = None
      for entry in fkey:
        if entry.count( 'GlueSiteUniqueID' ):
          nameBDII = entry.split( '=' )[1]
          break
      if not nameBDII:
        continue

      cestring = "CE: %s, GOCDB Name: %s" % ( ce, nameBDII )
      self.log.info( cestring )

      response = ldapCE( ce )
      if not response['OK']:
        self.log.warn( "Error during BDII request", response['Message'] )
        response = self.__checkAlternativeBDIISite( ldapCE, ce )
        continue

      ceinfos = response['Value']
      if len( ceinfos ):
        ceinfo = ceinfos[0]
        systemName = ceinfo.get( 'GlueHostOperatingSystemName', 'Unknown' )
        systemVersion = ceinfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' )
        systemRelease = ceinfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' )
      else:
        systemName = "Unknown"
        systemVersion = "Unknown"
        systemRelease = "Unknown"

      osstring = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease )
      self.log.info( osstring )

      response = ldapCEState( ce, vo = self.voName )
      if not response['OK']:
        self.log.warn( "Error during BDII request", response['Message'] )
        response = self.__checkAlternativeBDIISite( ldapCEState, ce, self.voName )
        continue

      newcestring = "\n\n%s\n%s" % ( cestring, osstring )
      usefull = False
      cestates = response['Value']
      for cestate in cestates:
        queuename = cestate.get( 'GlueCEUniqueID', 'UnknownName' )
        queuestatus = cestate.get( 'GlueCEStateStatus', 'UnknownStatus' )

        queuestring = "%s %s" % ( queuename, queuestatus )
        self.log.info( queuestring )
        newcestring += "\n%s" % queuestring
        if queuestatus.count( 'Production' ):
          usefull = True
      if usefull:
        body += newcestring
        possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % ( nameBDII, ce ) )
    if body:
      body = "We are glad to inform You about new CE(s) possibly suitable for %s:\n" % self.voName + body
      body += "\n\nTo suppress information about CE add its name to BannedCEs list."
      for  possibleNewSite in  possibleNewSites:
        body = "%s\n%s" % ( body, possibleNewSite )
      self.log.info( body )
      if self.addressTo and self.addressFrom:
        notification = NotificationClient()
        result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )

    return S_OK()
Ejemplo n.º 51
0
class Synchronizer( object ):
  '''
  Every time there is a successful write on the CS, Synchronizer().sync() is 
  executed. It updates the database with the values on the CS.
  '''
  
  def __init__( self ):
    """
    Constructor.
    
    examples:
      >>> s = Synchronizer()
    """
    
    self.log        = gLogger.getSubLogger( self.__class__.__name__ )
    self.operations = Operations()
    self.resources  = Resources()
    
    self.rStatus    = ResourceStatusClient.ResourceStatusClient()  
    self.rssConfig  = RssConfiguration()
  
    self.diracAdmin = DiracAdmin()
  
  def sync( self, _eventName, _params ):
    '''
    Main synchronizer method. It synchronizes the three types of elements: Sites,
    Resources and Nodes. Each _syncX method returns a dictionary with the additions
    and deletions.
    
    examples:
      >>> s.sync( None, None )
          S_OK()
    
    :Parameters:
      **_eventName** - any
        this parameter is ignored, but needed by caller function.
      **_params** - any
        this parameter is ignored, but needed by caller function.
    
    :return: S_OK
    '''
    
    defSyncResult = { 'added' : [], 'deleted' : [] }
    
    # Sites
    syncSites = self._syncSites()
    if not syncSites[ 'OK' ]:
      self.log.error( syncSites[ 'Message' ] )
    syncSites = ( syncSites[ 'OK' ] and syncSites[ 'Value' ] ) or defSyncResult
    
    # Resources
    syncResources = self._syncResources()
    if not syncResources[ 'OK' ]:
      self.log.error( syncResources[ 'Message' ] )
    syncResources = ( syncResources[ 'OK' ] and syncResources[ 'Value' ] ) or defSyncResult 
    
    # Nodes
    syncNodes = self._syncNodes()
    if not syncNodes[ 'OK' ]:
      self.log.error( syncNodes[ 'Message' ] )
    syncNodes = ( syncNodes[ 'OK' ] and syncNodes[ 'Value' ] ) or defSyncResult
      
    # Notify via email to :  
    self.notify( syncSites, syncResources, syncNodes )
    
    return S_OK()

  def notify( self, syncSites, syncResources, syncNodes ):
    """
    Method sending email notification with the result of the synchronization. Email
    is sent to Operations( EMail/Production ) email address.
    
    examples:
      >>> s.notify( {}, {}, {} )
      >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} )
      >>> s.notify( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, 
                    { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} )
    
    :Parameters:
      **syncSites** - dict() ( keys: added, deleted )
        dictionary with the sites added and deleted from the DB
      **syncResources** - dict() ( keys: added, deleted )
        dictionary with the resources added and deleted from the DB
      **syncNodes** - dict() ( keys: added, deleted )
        dictionary with the nodes added and deleted from the DB
      
    :return: S_OK
    """
    
    # Human readable summary
    msgBody = self.getBody( syncSites, syncResources, syncNodes ) 
    self.log.info( msgBody )
    
    # Email addresses
    toAddress   = self.operations.getValue( 'EMail/Production', '' )
    fromAddress = self.rssConfig.getConfigFromAddress( '' )
    
    if toAddress and fromAddress and msgBody:
      
      # Subject of the email
      setup   = gConfig.getValue( 'DIRAC/Setup' )
      subject = '[RSS](%s) CS Synchronization' % setup
      
      self.diracAdmin.sendMail( toAddress, subject, msgBody, fromAddress = fromAddress )
     
  def getBody( self, syncSites, syncResources, syncNodes ):
    """
    Method that given the outputs of the three synchronization methods builds a
    human readable string.
    
    examples:
      >>> s.getBody( {}, {}, {} )
          ''
      >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, {}, {} )
          '''
          SITES:
          Site:
            deleted:1
              RubbishSite
          '''
      >>> s.getBody( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] }, 
                     { 'Computing : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] }}, {} )    
          '''
          SITES:
          Site:
            deleted:1
              RubbishSite
          RESOURCES:
          Computing:
            added:2
              newCE01
              newCE02    
          '''
          
    :Parameters:
      **syncSites** - dict() ( keys: added, deleted )
        dictionary with the sites added and deleted from the DB
      **syncResources** - dict() ( keys: added, deleted )
        dictionary with the resources added and deleted from the DB
      **syncNodes** - dict() ( keys: added, deleted )
        dictionary with the nodes added and deleted from the DB
      
    :return: str    
    """
        
    syncMsg = ''
       
    for element, syncResult in [ ( 'SITES', syncSites ), ( 'RESOURCES', syncResources ), 
                                 ( 'NODES', syncNodes ) ]:
    
      elementsMsg = ''
    
      for elementType, elements in syncResult.items():
    
        elementMsg = ''
        if elements[ 'added' ]:
          elementMsg += '\n  %s added: %d \n' % ( elementType, len( elements[ 'added' ] ) )
          elementMsg += '    ' + '\n    '.join( elements[ 'added' ] ) 
        if elements[ 'deleted' ]:
          elementMsg += '\n  %s deleted: %d \n' % ( elementType, len( elements[ 'deleted' ] ) )
          elementMsg += '    ' + '\n    '.join( elements[ 'deleted' ] )    
          
        if elementMsg:
          elementsMsg += '\n\n%s:\n' % elementType
          elementsMsg += elementMsg
        
      if elementsMsg:
        syncMsg += '\n\n%s:' % element + elementsMsg

    return syncMsg 

  #.............................................................................
  # Sync methods: Site, Resource & Node

  def _syncSites( self ):
    """
    Method that synchronizes sites ( using their canonical name: CERN.ch ) with
    elementType = 'Site'. It gets from the CS the eligible site names and then
    synchronizes them with the DB. If not on the DB, they are added. If in the DB
    but not on the CS, they are deleted.
    
    examples:
      >> s._syncSites()
         S_OK( { 'Site' : { 'added' : [], 'deleted' : [ 'RubbishSite' ] } } )
    
    :return: S_OK( { 'Site' : { 'added' : [], 'deleted' : [] }} ) | S_ERROR
    """
    
    # Get site names from the CS
    foundSites = self.resources.getEligibleSites()
    if not foundSites[ 'OK' ]:
      return foundSites
       
    sites = {}
    
    # Synchronize with the DB
    resSync = self.__dbSync( 'Site', 'Site', foundSites[ 'Value' ] )
    if not resSync[ 'OK' ]:
      self.log.error( 'Error synchronizing Sites' )
      self.log.error( resSync[ 'Message' ] )
    else:
      sites = resSync[ 'Value' ]  
  
    return S_OK( { 'Site' : sites } )
    
  def _syncResources( self ):
    """
    Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary
    keys. It makes one sync round per key ( elementType ). Gets from the CS the 
    eligible Resource/<elementType> names and then synchronizes them with the DB. 
    If not on the DB, they are added. If in the DB but not on the CS, they are deleted.
    
    examples:
      >>> s._syncResources() 
          S_OK( { 'Computing' : { 'added' : [ 'newCE01', 'newCE02' ], 'deleted' : [] },
                  'Storage'   : { 'added' : [], 'deleted' : [] },
                  ... } ) 
    
    :return: S_OK( { 'RESOURCE_NODE_MAPPINGKey1' : { 'added' : [], 'deleted' : [] }, ...} )
    """
    
    resources = {}
    
    # Iterate over the different elementTypes for Resource ( Computing, Storage... )
    for elementType in RESOURCE_NODE_MAPPING.keys():
      
      # Get Resource / <elementType> names from CS
      foundResources = self.resources.getEligibleResources( elementType )
      if not foundResources[ 'OK' ]:
        self.log.error( foundResources[ 'Message' ] )
        continue
      
      # Translate CS result into a list
      foundResources = foundResources[ 'Value' ]
      
      # Synchronize with the DB
      resSync = self.__dbSync( 'Resource', elementType, foundResources )
      if not resSync[ 'OK' ]:
        self.log.error( 'Error synchronizing %s %s' % ( 'Resource', elementType ) )
        self.log.error( resSync[ 'Message' ] )
      else: 
        resources[ elementType ] = resSync[ 'Value' ] 
  
    return S_OK( resources )

  def _syncNodes( self ):
    """
    Method that synchronizes resources as defined on RESOURCE_NODE_MAPPING dictionary
    values. It makes one sync round per key ( elementType ). Gets from the CS the 
    eligible Node/<elementType> names and then synchronizes them with the DB. 
    If not on the DB, they are added. If in the DB but not on the CS, they are deleted.
    
    examples:
      >>> s._syncNodes() 
          S_OK( { 'Queue' : { 'added' : [], 'deleted' : [] },
                  ... } ) 
    
    :return: S_OK( { 'RESOURCE_NODE_MAPPINGValue1' : { 'added' : [], 'deleted' : [] }, ...} )
    """
    
    nodes = {}
    
    # Iterate over the different elementTypes for Node ( Queue, AccessProtocol... )
    for elementType in RESOURCE_NODE_MAPPING.values():
      
      # Get Node / <elementType> names from CS
      foundNodes = self.resources.getEligibleNodes( elementType )
      if not foundNodes[ 'OK' ]:
        self.log.error( foundNodes[ 'Value' ] )
        continue
      
      # Translate CS result into a list : maps NodeName to SiteName<>NodeName to 
      # avoid duplicates
      # Looong list comprehension, sorry !
      foundNodes = [ '%s<>%s' % ( key, item ) for key, subDict in foundNodes[ 'Value' ].items() 
                     for subList in subDict.values() for item in subList ]
             
      # Synchronize with the DB       
      resSync = self.__dbSync( 'Node', elementType, foundNodes )
      if not resSync[ 'OK' ]:
        self.log.error( 'Error synchronizing %s %s' % ( 'Node', elementType ) )
        self.log.error( resSync[ 'Message' ] )
      else: 
        nodes[ elementType ] = resSync[ 'Value' ] 
  
    return S_OK( nodes )

  #.............................................................................
  # DB sync actions
  
  def __dbSync( self, elementFamily, elementType, elementsCS ):
    """
    Method synchronizing CS and DB. Compares <elementsCS> with <elementsDB>
    given the elementFamily and elementType ( e.g. Resource / Computing ).
    If there are missing elements in the DB, are inserted. If are missing elements
    in the CS, are deleted from the DB. Note that the logs from the RSS DB
    are kept ! ( just in case ).
    
    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>
      **elementsCS** - list
        list with the elements for <elementFamily>/<elementType> found in the CS  
    
    :return: S_OK( { 'added' : [], 'deleted' : [] } ) | S_ERROR
    """ 
    
    # deleted, added default response
    syncRes = { 
                'deleted' : [],
                'added'   : [],
              }
    
    # Gets <elementFamily>/<elementType> elements from DB
    elementsDB = self.rStatus.selectStatusElement( elementFamily, 'Status', 
                                                   elementType = elementType,
                                                   meta = { 'columns' : [ 'name' ] } )
    if not elementsDB[ 'OK' ]:
      return elementsDB
    elementsDB = [ elementDB[ 0 ] for elementDB in elementsDB[ 'Value' ] ]      
    
    # Elements in DB but not in CS -> to be deleted
    toBeDeleted = list( set( elementsDB ).difference( set( elementsCS ) ) )
    if toBeDeleted:
      resDelete = self.__dbDelete( elementFamily, elementType, toBeDeleted )
      if not resDelete[ 'OK' ]:
        return resDelete  
      else:
        syncRes[ 'deleted' ] = toBeDeleted
    
    # Elements in CS but not in DB -> to be added
    toBeAdded = list( set( elementsCS ).difference( set( elementsDB ) ) )
    if toBeAdded:
      resInsert = self.__dbInsert( elementFamily, elementType, toBeAdded )
      if not resInsert[ 'OK' ]:
        return resInsert
      else:
        syncRes[ 'added' ] = toBeAdded
           
    return S_OK( syncRes )
  
  def __dbDelete( self, elementFamily, elementType, toBeDeleted ):
    """
    Method that given the elementFamily and elementType, deletes all entries
    in the History and Status tables for the given elements in toBeDeleted ( all
    their status Types ).

    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>, just used for logging
        purposes.
      **toBeDeleted** - list
        list with the elements to be deleted  
    
    :return: S_OK | S_ERROR    
    """
    
    self.log.info( 'Deleting %s %s:' % ( elementFamily, elementType ) )
    self.log.info( toBeDeleted )
    
    return self.rStatus._extermineStatusElement( elementFamily, toBeDeleted )
  
  def __dbInsert( self, elementFamily, elementType, toBeAdded ):  
    """
    Method that given the elementFamily and elementType, adds all elements in
    toBeAdded with their respective statusTypes, obtained from the CS. They 
    are synchronized with status 'Unknown' and reason 'Synchronized'.

    :Parameters:
      **elementFamily** - str
        any of the valid element families : Site, Resource, Node
      **elementType** - str
        any of the valid element types for <elementFamily>
      **toBeDeleted** - list
        list with the elements to be added  
    
    :return: S_OK | S_ERROR    
    """
    
    self.log.info( 'Adding %s %s:' % ( elementFamily, elementType ) )
    self.log.info( toBeAdded )
    
    statusTypes = self.rssConfig.getConfigStatusType( elementType )

    for element in toBeAdded:
      
      for statusType in statusTypes:
  
        resInsert = self.rStatus.addIfNotThereStatusElement( elementFamily, 'Status', 
                                                             name        = element, 
                                                             statusType  = statusType, 
                                                             status      = 'Unknown', 
                                                             elementType = elementType, 
                                                             reason      = 'Synchronized')

        if not resInsert[ 'OK' ]:
          return resInsert
    
    return S_OK()
    
#...............................................................................    
 
#  
#  def _syncUsers( self ):
#    '''
#      Sync Users: compares CS with DB and does the necessary modifications.
#    '''    
#    
#    gLogger.verbose( '-- Synchronizing users --')
#    
#    usersCS = CSHelpers.getRegistryUsers()
#    if not usersCS[ 'OK' ]:
#      return usersCS
#    usersCS = usersCS[ 'Value' ]
#    
#    gLogger.verbose( '%s users found in CS' % len( usersCS ) )
#    
#    usersDB = self.rManagement.selectUserRegistryCache( meta = { 'columns' : [ 'login' ] } ) 
#    if not usersDB[ 'OK' ]:
#      return usersDB    
#    usersDB = [ userDB[0] for userDB in usersDB[ 'Value' ] ]
#    
#    # Users that are in DB but not in CS
#    toBeDeleted = list( set( usersDB ).difference( set( usersCS.keys() ) ) )
#    gLogger.verbose( '%s users to be deleted' % len( toBeDeleted ) )
#    
#    # Delete users
#    # FIXME: probably it is not needed since there is a DatabaseCleanerAgent
#    for userLogin in toBeDeleted:
#      
#      deleteQuery = self.rManagement.deleteUserRegistryCache( login = userLogin )
#      
#      gLogger.verbose( '... %s' % userLogin )
#      if not deleteQuery[ 'OK' ]:
#        return deleteQuery      
#     
#    # AddOrModify Users 
#    for userLogin, userDict in usersCS.items():
#      
#      _name  = userDict[ 'DN' ].split( '=' )[ -1 ]
#      _email = userDict[ 'Email' ]
#      
#      query = self.rManagement.addOrModifyUserRegistryCache( userLogin, _name, _email )
#      gLogger.verbose( '-> %s' % userLogin )
#      if not query[ 'OK' ]:
#        return query     
#  
#    return S_OK()
    
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF  
Ejemplo n.º 52
0
    def export_killPilot(self, pilotRefList):
        """ Kill the specified pilots
    """
        # Make a list if it is not yet
        pilotRefs = list(pilotRefList)
        if type(pilotRefList) in StringTypes:
            pilotRefs = [pilotRefList]

        # Regroup pilots per site and per owner
        pilotRefDict = {}
        for pilotReference in pilotRefs:
            result = pilotDB.getPilotInfo(pilotReference)
            if not result['OK'] or not result['Value']:
                return S_ERROR('Failed to get info for pilot ' +
                               pilotReference)

            pilotDict = result['Value'][pilotReference]
            owner = pilotDict['OwnerDN']
            group = pilotDict['OwnerGroup']
            queue = '@@@'.join([
                owner, group, pilotDict['GridSite'],
                pilotDict['DestinationSite'], pilotDict['Queue']
            ])
            gridType = pilotDict['GridType']
            pilotRefDict.setdefault(queue, {})
            pilotRefDict[queue].setdefault('PilotList', [])
            pilotRefDict[queue]['PilotList'].append(pilotReference)
            pilotRefDict[queue]['GridType'] = gridType

        # Do the work now queue by queue
        ceFactory = ComputingElementFactory()
        failed = []
        for key, pilotDict in pilotRefDict.items():

            owner, group, site, ce, queue = key.split('@@@')
            result = Resources(group=group).getQueueDescription(queue)
            if not result['OK']:
                return result
            queueDict = result['Value']
            gridType = pilotDict['GridType']
            result = ceFactory.getCE(gridType, ce, queueDict)
            if not result['OK']:
                return result
            ce = result['Value']

            if gridType in ["LCG", "gLite", "CREAM"]:
                group = getGroupOption(group, 'VOMSRole', group)
                ret = gProxyManager.getPilotProxyFromVOMSGroup(owner, group)
                if not ret['OK']:
                    gLogger.error(ret['Message'])
                    gLogger.error('Could not get proxy:',
                                  'User "%s", Group "%s"' % (owner, group))
                    return S_ERROR("Failed to get the pilot's owner proxy")
                proxy = ret['Value']
                ce.setProxy(proxy)

            pilotList = pilotDict['PilotList']
            result = ce.killJob(pilotList)
            if not result['OK']:
                failed.extend(pilotList)

        if failed:
            return S_ERROR('Failed to kill at least some pilots')

        return S_OK()
Ejemplo n.º 53
0
class FileCatalog:

    ro_methods = [
        'exists', 'isLink', 'readLink', 'isFile', 'getFileMetadata',
        'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory',
        'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata',
        'getDirectorySize', 'getDirectoryContents', 'resolveDataset',
        'getPathPermissions', 'getLFNForPFN', 'getUsers', 'getGroups',
        'getFileUserMetadata'
    ]

    write_methods = [
        'createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica',
        'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost',
        'createDirectory', 'setDirectoryStatus', 'removeDirectory',
        'removeDataset', 'removeFileFromDataset', 'createDataset'
    ]

    def __init__(self, catalogs=[], vo=None):
        """ Default constructor
    """
        self.valid = True
        self.timeout = 180
        self.readCatalogs = []
        self.writeCatalogs = []
        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return result
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)
        self.reHelper = Resources(vo=self.vo)

        if type(catalogs) in types.StringTypes:
            catalogs = [catalogs]
        if catalogs:
            res = self._getSelectedCatalogs(catalogs)
        else:
            res = self._getCatalogs()
        if not res['OK']:
            self.valid = False
        elif (len(self.readCatalogs) == 0) and (len(self.writeCatalogs) == 0):
            self.valid = False

    def isOK(self):
        return self.valid

    def getReadCatalogs(self):
        return self.readCatalogs

    def getWriteCatalogs(self):
        return self.writeCatalogs

    def __getattr__(self, name):
        self.call = name
        if name in FileCatalog.write_methods:
            return self.w_execute
        elif name in FileCatalog.ro_methods:
            return self.r_execute
        else:
            raise AttributeError

    def __checkArgumentFormat(self, path):
        if type(path) in types.StringTypes:
            urls = {path: False}
        elif type(path) == types.ListType:
            urls = {}
            for url in path:
                urls[url] = False
        elif type(path) == types.DictType:
            urls = path
        else:
            return S_ERROR(
                "FileCatalog.__checkArgumentFormat: Supplied path is not of the correct format."
            )
        return S_OK(urls)

    def w_execute(self, *parms, **kws):
        """ Write method executor.
    """
        successful = {}
        failed = {}
        failedCatalogs = []
        fileInfo = parms[0]
        res = self.__checkArgumentFormat(fileInfo)
        if not res['OK']:
            return res
        fileInfo = res['Value']
        allLfns = fileInfo.keys()
        for catalogName, oCatalog, master in self.writeCatalogs:
            method = getattr(oCatalog, self.call)
            res = method(fileInfo, **kws)
            if not res['OK']:
                if master:
                    # If this is the master catalog and it fails we dont want to continue with the other catalogs
                    gLogger.error(
                        "FileCatalog.w_execute: Failed to execute %s on master catalog %s."
                        % (self.call, catalogName), res['Message'])
                    return res
                else:
                    # Otherwise we keep the failed catalogs so we can update their state later
                    failedCatalogs.append((catalogName, res['Message']))
            else:
                for lfn, message in res['Value']['Failed'].items():
                    # Save the error message for the failed operations
                    if not failed.has_key(lfn):
                        failed[lfn] = {}
                    failed[lfn][catalogName] = message
                    if master:
                        # If this is the master catalog then we should not attempt the operation on other catalogs
                        fileInfo.pop(lfn)
                for lfn, result in res['Value']['Successful'].items():
                    # Save the result return for each file for the successful operations
                    if not successful.has_key(lfn):
                        successful[lfn] = {}
                    successful[lfn][catalogName] = result
        # This recovers the states of the files that completely failed i.e. when S_ERROR is returned by a catalog
        for catalogName, errorMessage in failedCatalogs:
            for lfn in allLfns:
                if not failed.has_key(lfn):
                    failed[lfn] = {}
                failed[lfn][catalogName] = errorMessage
        resDict = {'Failed': failed, 'Successful': successful}
        return S_OK(resDict)

    def r_execute(self, *parms, **kws):
        """ Read method executor.
    """
        successful = {}
        failed = {}
        for catalogTuple in self.readCatalogs:
            oCatalog = catalogTuple[1]
            method = getattr(oCatalog, self.call)
            res = method(*parms, **kws)
            if res['OK']:
                if 'Successful' in res['Value']:
                    for key, item in res['Value']['Successful'].items():
                        if not successful.has_key(key):
                            successful[key] = item
                            if failed.has_key(key):
                                failed.pop(key)
                    for key, item in res['Value']['Failed'].items():
                        if not successful.has_key(key):
                            failed[key] = item
                    if len(failed) == 0:
                        resDict = {'Failed': failed, 'Successful': successful}
                        return S_OK(resDict)
                else:
                    return res
        if (len(successful) == 0) and (len(failed) == 0):
            return S_ERROR('Failed to perform %s from any catalog' % self.call)
        resDict = {'Failed': failed, 'Successful': successful}
        return S_OK(resDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the objects instantiated for a provided catalogue configuration
    #

    def addCatalog(self, catalogName, mode="Write", master=False):
        """ Add a new catalog with catalogName to the pool of catalogs in mode:
        "Read","Write" or "ReadWrite"
    """

        result = self._generateCatalogObject(catalogName)
        if not result['OK']:
            return result

        oCatalog = result['Value']
        if mode.lower().find("read") != -1:
            self.readCatalogs.append((catalogName, oCatalog, master))
        if mode.lower().find("write") != -1:
            self.writeCatalogs.append((catalogName, oCatalog, master))

        return S_OK()

    def removeCatalog(self, catalogName):
        """ Remove the specified catalog from the internal pool
    """

        catalog_removed = False

        for i in range(len(self.readCatalogs)):
            catalog = self.readCatalogs[i][0]
            if catalog == catalogName:
                del self.readCatalogs[i]
                catalog_removed = True
                break
        for i in range(len(self.writeCatalogs)):
            catalog = self.writeCatalogs[i][0]
            if catalog == catalogName:
                del self.writeCatalogs[i]
                catalog_removed = True
                break

        if catalog_removed:
            return S_OK()
        else:
            return S_OK('Catalog does not exist')

    def _getSelectedCatalogs(self, desiredCatalogs):
        for catalogName in desiredCatalogs:
            res = self._generateCatalogObject(catalogName)
            if not res['OK']:
                return res
            oCatalog = res['Value']
            self.readCatalogs.append((catalogName, oCatalog, True))
            self.writeCatalogs.append((catalogName, oCatalog, True))
        return S_OK()

    def _getCatalogs(self):

        # Get the eligible catalogs first
        # First, look in the Operations, if nothing defined look in /Resources
        result = self.opHelper.getSections('/Services/Catalogs')
        fileCatalogs = []
        operationsFlag = False
        if result['OK']:
            fileCatalogs = result['Value']
            operationsFlag = True
        else:
            res = self.reHelper.getEligibleResources('Catalog')
            if not res['OK']:
                errStr = "FileCatalog._getCatalogs: Failed to get file catalog configuration."
                gLogger.error(errStr, res['Message'])
                return S_ERROR(errStr)
            fileCatalogs = res['Value']

        # Get the catalogs now
        for catalogName in fileCatalogs:
            res = self._getCatalogConfigDetails(catalogName)
            if not res['OK']:
                return res
            catalogConfig = res['Value']
            if operationsFlag:
                result = self.opHelper.getOptionsDict('/Services/Catalogs/%s' %
                                                      catalogName)
                if not result['OK']:
                    return result
                catalogConfig.update(result['Value'])
            if catalogConfig['Status'] == 'Active':
                res = self._generateCatalogObject(catalogName)
                if not res['OK']:
                    return res
                oCatalog = res['Value']
                master = catalogConfig['Master']
                # If the catalog is read type
                if re.search('Read', catalogConfig['AccessType']):
                    if master:
                        self.readCatalogs.insert(
                            0, (catalogName, oCatalog, master))
                    else:
                        self.readCatalogs.append(
                            (catalogName, oCatalog, master))
                # If the catalog is write type
                if re.search('Write', catalogConfig['AccessType']):
                    if master:
                        self.writeCatalogs.insert(
                            0, (catalogName, oCatalog, master))
                    else:
                        self.writeCatalogs.append(
                            (catalogName, oCatalog, master))
        return S_OK()

    def _getCatalogConfigDetails(self, catalogName):
        # First obtain the options that are available

        result = self.reHelper.getCatalogOptionsDict(catalogName)
        if not result['OK']:
            errStr = "FileCatalog._getCatalogConfigDetails: Failed to get catalog options"
            gLogger.error(errStr, catalogName)
            return S_ERROR(errStr)
        catalogConfig = result['Value']
        # The 'Status' option should be defined (default = 'Active')
        if not catalogConfig.has_key('Status'):
            warnStr = "FileCatalog._getCatalogConfigDetails: 'Status' option not defined"
            gLogger.warn(warnStr, catalogName)
            catalogConfig['Status'] = 'Active'
        # The 'AccessType' option must be defined
        if not catalogConfig.has_key('AccessType'):
            errStr = "FileCatalog._getCatalogConfigDetails: Required option 'AccessType' not defined"
            gLogger.error(errStr, catalogName)
            return S_ERROR(errStr)
        # Anything other than 'True' in the 'Master' option means it is not
        if not catalogConfig.has_key('Master'):
            catalogConfig['Master'] = False
        elif catalogConfig['Master'] == 'True':
            catalogConfig['Master'] = True
        else:
            catalogConfig['Master'] = False
        return S_OK(catalogConfig)

    def _generateCatalogObject(self, catalogName):
        """ Create a file catalog object from its name and CS description
    """
        useProxy = gConfig.getValue(
            '/LocalSite/Catalogs/%s/UseProxy' % catalogName, False)
        if not useProxy:
            useProxy = self.opHelper.getValue(
                '/Services/Catalogs/%s/UseProxy' % catalogName, False)
        return FileCatalogFactory().createCatalog(catalogName, useProxy)