Exemplo n.º 1
0
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)
Exemplo n.º 2
0
  def __init__( self, useProxy=False ):

    self.rootConfigPath = '/Resources/StorageElements'
    self.valid = True
    self.proxy = False
    self.proxy = useProxy
    self.resourceStatus = ResourceStatus()
Exemplo n.º 3
0
  def __init__( self, configSection, channels=None, bandwidths=None, failedFiles=None ):
    """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
    ## save config section
    self.configSection = configSection + "/" + self.__class__.__name__
    ## 

    ## sublogger
    self.log = gLogger.getSubLogger( "StrategyHandler", child=True )
    self.log.setLevel( gConfig.getValue( self.configSection + "/LogLevel", "DEBUG"  ) )
  
    self.supportedStrategies = [ 'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait' ]
    self.log.info( "Supported strategies = %s" % ", ".join( self.supportedStrategies ) )
  
    self.sigma = gConfig.getValue( self.configSection + '/HopSigma', 0.0 )
    self.log.info( "HopSigma = %s" % self.sigma )
    self.schedulingType = gConfig.getValue( self.configSection + '/SchedulingType', 'File' )
    self.log.info( "SchedulingType = %s" % self.schedulingType )
    self.activeStrategies = gConfig.getValue( self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'] )
    self.log.info( "ActiveStrategies = %s" % ", ".join( self.activeStrategies ) )
    self.numberOfStrategies = len( self.activeStrategies )
    self.log.info( "Number of active strategies = %s" % self.numberOfStrategies )
    self.acceptableFailureRate = gConfig.getValue( self.configSection + '/AcceptableFailureRate', 75 )
    self.log.info( "AcceptableFailureRate = %s" % self.acceptableFailureRate )
    self.acceptableFailedFiles = gConfig.getValue( self.configSection + "/AcceptableFailedFiles", 5 )
    self.log.info( "AcceptableFailedFiles = %s" % self.acceptableFailedFiles )
    self.rwUpdatePeriod = gConfig.getValue( self.configSection + "/RssRWUpdatePeriod", 300 )
    self.log.info( "RSSUpdatePeriod = %s s" % self.rwUpdatePeriod )
    self.rwUpdatePeriod = datetime.timedelta( seconds=self.rwUpdatePeriod )
    ## bandwithds
    self.bandwidths = bandwidths if bandwidths else {}
    ## channels
    self.channels = channels if channels else {}
    ## distinct failed files per channel 
    self.failedFiles = failedFiles if failedFiles else {}
    ## chosen strategy
    self.chosenStrategy = 0
    ## fts graph
    self.ftsGraph = None
    ## timestamp for last update
    self.lastRssUpdate = datetime.datetime.now()    
    # dispatcher
    self.strategyDispatcher = { "MinimiseTotalWait" : self.minimiseTotalWait, 
                                "DynamicThroughput" : self.dynamicThroughput,
                                "Simple" : self.simple, 
                                "Swarm" : self.swarm }
    ## own RSS client
    self.resourceStatus = ResourceStatus()
    ## create fts graph
    ftsGraph = self.setup( self.channels, self.bandwidths, self.failedFiles )    
    if not ftsGraph["OK"]:
      raise SHGraphCreationError( ftsGraph["Message"] )
    self.log.info("%s has been constructed" % self.__class__.__name__ )
Exemplo n.º 4
0
  def __init__( self ):

    self.rootConfigPath = '/Resources/StorageElements'
    self.valid = True
    self.proxy = False
    res = gConfig.getOption( "%s/UseProxy" % self.rootConfigPath )
    if res['OK'] and ( res['Value'] == 'True' ):
      self.proxy = True
    self.resourceStatus = ResourceStatus()
Exemplo n.º 5
0
  def __init__( self, configSection, bandwidths=None, channels=None, failedFiles=None ):
    """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
    ## save config section
    self.configSection = configSection + "/" + self.__class__.__name__
    ## sublogger
    self.log = gLogger.getSubLogger( "StrategyHandler", child=True )
    self.log.setLevel( gConfig.getValue( self.configSection + "/LogLevel", "DEBUG"  ) )
  
    self.supportedStrategies = [ 'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait' ]
    self.log.debug( "Supported strategies = %s" % ", ".join( self.supportedStrategies ) )
  
    self.sigma = gConfig.getValue( self.configSection + '/HopSigma', 0.0 )
    self.log.debug( "HopSigma = %s" % self.sigma )
    self.schedulingType = gConfig.getValue( self.configSection + '/SchedulingType', 'File' )
    self.log.debug( "SchedulingType = %s" % self.schedulingType )
    self.activeStrategies = gConfig.getValue( self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'] )
    self.log.debug( "ActiveStrategies = %s" % ", ".join( self.activeStrategies ) )
    self.numberOfStrategies = len( self.activeStrategies )
    self.log.debug( "Number of active strategies = %s" % self.numberOfStrategies )
    self.acceptableFailureRate = gConfig.getValue( self.configSection + '/AcceptableFailureRate', 75 )
    self.log.debug( "AcceptableFailureRate = %s" % self.acceptableFailureRate )
    self.acceptableFailedFiles = gConfig.getValue( self.configSection + "/AcceptableFailedFiles", 5 )
    self.log.debug( "AcceptableFailedFiles = %s" % self.acceptableFailedFiles )

    self.bandwidths = bandwidths if bandwidths else {}
    self.channels = channels if channels else {}
    self.failedFiles = failedFiles if failedFiles else {}
    self.chosenStrategy = 0

    # dispatcher
    self.strategyDispatcher = { re.compile("MinimiseTotalWait") : self.__minimiseTotalWait, 
                                re.compile("DynamicThroughput") : self.__dynamicThroughput,
                                re.compile("Simple") : self.__simple, 
                                re.compile("Swarm") : self.__swarm }

    self.resourceStatus = ResourceStatus()

    self.log.debug( "strategyDispatcher entries:" )
    for key, value in self.strategyDispatcher.items():
      self.log.debug( "%s : %s" % ( key.pattern, value.__name__ ) )

    self.log.debug("%s has been constructed" % self.__class__.__name__ )
Exemplo n.º 6
0
  def __init__( self, csPath = None, ftsHistoryViews = None ):
    """
        Call the init of the parent, and initialize the list of FTS3 servers
    """

    self.log = gLogger.getSubLogger( "FTS3Placement" )
    super( FTS3Placement, self ).__init__( csPath = csPath, ftsHistoryViews = ftsHistoryViews )
    srvList = getFTS3Servers()
    if not srvList['OK']:
      self.log.error( srvList['Message'] )

    self.__serverList = srvList.get( 'Value', [] )
    self.maxAttempts = len( self.__serverList )

    self.rssClient = ResourceStatus()
Exemplo n.º 7
0
  def initializeOptimizer( self ):
    """Initialize specific parameters for JobSanityAgent.
    """
    self.failedMinorStatus = self.am_getOption( '/FailedJobStatus', 'Input Data Not Available' )
    #this will ignore failover SE files
    self.checkFileMetadata = self.am_getOption( 'CheckFileMetadata', True )

    self.dataManager = DataManager()
    self.resourceStatus = ResourceStatus()
    self.fc = FileCatalog()

    self.seToSiteMapping = {}
    self.lastCScheck = 0
    self.cacheLength = 600

    return S_OK()
Exemplo n.º 8
0
 def __init__( self, useProxy = False, vo = None ):
   self.rootConfigPath = '/Resources/StorageElements'
   self.proxy = False
   self.proxy = useProxy
   self.resourceStatus = ResourceStatus()
   self.vo = vo
   if self.vo is None:
     result = getVOfromProxyGroup()
     if result['OK']:
       self.vo = result['Value']
     else:
       RuntimeError( "Can not get the current VO context" )
   self.remotePlugins = []
   self.localPlugins = []
   self.name = ''
   self.options = {}
   self.protocolDetails = []
   self.storages = []
Exemplo n.º 9
0
  def __init__(self, serverDict, serverPolicy="Random"):
    """
        Call the init of the parent, and initialize the list of FTS3 servers
    """

    self.log = gLogger.getSubLogger("FTS3ServerPolicy")

    self._serverDict = serverDict
    self._serverList = serverDict.keys()
    self._maxAttempts = len(self._serverList)
    self._nextServerID = 0
    self._resourceStatus = ResourceStatus()

    methName = "_%sServerPolicy" % serverPolicy.lower()
    if not hasattr(self, methName):
      self.log.error('Unknown server policy %s. Using Random instead' % serverPolicy)
      methName = "_randomServerPolicy"

    self._policyMethod = getattr(self, methName)
Exemplo n.º 10
0
class StrategyHandler( object ):
  """
  .. class:: StrategyHandler

  StrategyHandler is a helper class for determining optimal replication tree for given
  source files, their replicas and target storage elements.
  """

  def __init__( self, configSection, channels=None, bandwidths=None, failedFiles=None ):
    """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
    ## save config section
    self.configSection = configSection + "/" + self.__class__.__name__
    ## 

    ## sublogger
    self.log = gLogger.getSubLogger( "StrategyHandler", child=True )
    self.log.setLevel( gConfig.getValue( self.configSection + "/LogLevel", "DEBUG"  ) )
  
    self.supportedStrategies = [ 'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait' ]
    self.log.info( "Supported strategies = %s" % ", ".join( self.supportedStrategies ) )
  
    self.sigma = gConfig.getValue( self.configSection + '/HopSigma', 0.0 )
    self.log.info( "HopSigma = %s" % self.sigma )
    self.schedulingType = gConfig.getValue( self.configSection + '/SchedulingType', 'File' )
    self.log.info( "SchedulingType = %s" % self.schedulingType )
    self.activeStrategies = gConfig.getValue( self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'] )
    self.log.info( "ActiveStrategies = %s" % ", ".join( self.activeStrategies ) )
    self.numberOfStrategies = len( self.activeStrategies )
    self.log.info( "Number of active strategies = %s" % self.numberOfStrategies )
    self.acceptableFailureRate = gConfig.getValue( self.configSection + '/AcceptableFailureRate', 75 )
    self.log.info( "AcceptableFailureRate = %s" % self.acceptableFailureRate )
    self.acceptableFailedFiles = gConfig.getValue( self.configSection + "/AcceptableFailedFiles", 5 )
    self.log.info( "AcceptableFailedFiles = %s" % self.acceptableFailedFiles )
    self.rwUpdatePeriod = gConfig.getValue( self.configSection + "/RssRWUpdatePeriod", 300 )
    self.log.info( "RSSUpdatePeriod = %s s" % self.rwUpdatePeriod )
    self.rwUpdatePeriod = datetime.timedelta( seconds=self.rwUpdatePeriod )
    ## bandwithds
    self.bandwidths = bandwidths if bandwidths else {}
    ## channels
    self.channels = channels if channels else {}
    ## distinct failed files per channel 
    self.failedFiles = failedFiles if failedFiles else {}
    ## chosen strategy
    self.chosenStrategy = 0
    ## fts graph
    self.ftsGraph = None
    ## timestamp for last update
    self.lastRssUpdate = datetime.datetime.now()    
    # dispatcher
    self.strategyDispatcher = { "MinimiseTotalWait" : self.minimiseTotalWait, 
                                "DynamicThroughput" : self.dynamicThroughput,
                                "Simple" : self.simple, 
                                "Swarm" : self.swarm }
    ## own RSS client
    self.resourceStatus = ResourceStatus()
    ## create fts graph
    ftsGraph = self.setup( self.channels, self.bandwidths, self.failedFiles )    
    if not ftsGraph["OK"]:
      raise SHGraphCreationError( ftsGraph["Message"] )
    self.log.info("%s has been constructed" % self.__class__.__name__ )

  def setup( self, channels, bandwithds, failedFiles ):
    """ prepare fts graph 

    :param dict channels: { channelID : { "Files" : long , Size = long, "ChannelName" : str, 
                                          "Source" : str, "Destination" : str , "ChannelName" : str, "Status" : str  } }
    :param dict bandwidths: { channelID { "Throughput" : float, "Fileput" : float, "SucessfulFiles" : long, "FailedFiles" : long  } }
    :param dict failedFiles: { channelID : int }

    channelInfo { channelName : { "ChannelID" : int, "TimeToStart" : float} }  
    """
    graph = FTSGraph( "sites" )
   
    result = getStorageElementSiteMapping()
    if not result['OK']:
      return result
    sitesDict = result['Value']

    ## create nodes 
    for site, ses in sitesDict.items():
      rwDict = self.__getRWAccessForSE( ses )
      if not rwDict["OK"]:
        return rwDict
      siteName = site
      if '.' in site:
        siteName = site.split('.')[1]  
      graph.addNode( LCGSite( siteName, { "SEs" : rwDict["Value"] } ) )
    ## channels { channelID : { "Files" : long , Size = long, "ChannelName" : str, 
    ##                          "Source" : str, "Destination" : str , 
    ##                          "ChannelName" : str, "Status" : str  } }
    ## bandwidths { channelID { "Throughput" : float, "Fileput" : float, 
    ##                           "SucessfulFiles" : long, "FailedFiles" : long  } }
    ## channelInfo { channelName : { "ChannelID" : int, "TimeToStart" : float} }
    for channelID, channelDict in channels.items():
      sourceName = channelDict["Source"]
      destName = channelDict["Destination"]
      fromNode = graph.getNode( sourceName )
      toNode = graph.getNode( destName )
      if fromNode and toNode:  
        rwAttrs = { "status" : channels[channelID]["Status"], 
                    "files" : channelDict["Files"],
                    "size" : channelDict["Size"],
                    "successfulAttempts" : bandwithds[channelID]["SuccessfulFiles"], 
                    "failedAttempts" : bandwithds[channelID]["FailedFiles"], 
                    "distinctFailedFiles" : failedFiles.get( channelID, 0 ),
                    "fileput" : bandwithds[channelID]["Fileput"], 
                    "throughput" : bandwithds[channelID]["Throughput"] }
        roAttrs = { "channelID" : channelID,
                    "channelName" : channelDict["ChannelName"],
                    "acceptableFailureRate" : self.acceptableFailureRate,
                    "acceptableFailedFiles" : self.acceptableFailedFiles,
                    "schedulingType" : self.schedulingType }
        ftsChannel = FTSChannel( fromNode, toNode, rwAttrs, roAttrs )
        graph.addEdge( ftsChannel ) 
    self.ftsGraph = graph
    self.lastRssUpdate = datetime.datetime.now()
    return S_OK()

  def updateGraph( self, rwAccess=False, replicationTree=None, size=0.0 ):
    """ update rw access for nodes (sites) and size anf files for edges (channels) """
    replicationTree = replicationTree if replicationTree else {}
    size = size if size else 0.0
    ## update nodes rw access for SEs
    if rwAccess:
      for lcgSite in self.ftsGraph.nodes():
        rwDict = self.__getRWAccessForSE( lcgSite.SEs.keys() )
        if not rwDict["OK"]:
          return rwDict
        lcgSite.SEs = rwDict["Value"]
    ## update channels size and files
    if replicationTree:
      for channel in self.ftsGraph.edges():
        if channel.channelID in replicationTree:
          channel.size += size 
          channel.files += 1
    return S_OK()
          
  def simple( self, sourceSEs, targetSEs ):
    """ simple strategy - one source, many targets

    :param list sourceSEs: list with only one sourceSE name
    :param list targetSEs: list with target SE names
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
    ## make targetSEs list unique 
    if len(sourceSEs) != 1:
      return S_ERROR( "simple: wrong argument supplied for sourceSEs, only one sourceSE allowed" )
    sourceSE = sourceSEs[0]
    tree = {}
    for targetSE in targetSEs:
      channel = self.ftsGraph.findChannel( sourceSE, targetSE )
      if not channel["OK"]:
        return S_ERROR( channel["Message"] )
      channel = channel["Value"]
      if not channel.fromNode.SEs[sourceSE]["read"]:
        return S_ERROR( "simple: sourceSE '%s' in banned for reading rigth now" % sourceSE )
      if not channel.toNode.SEs[targetSE]["write"]:
        return S_ERROR( "simple: targetSE '%s' is banned for writing rigth now" % targetSE )
      if channel.channelID in tree:
        return S_ERROR( "simple: unable to create replication tree, channel '%s' cannot be used twice" %\
                          channel.channelName )      
      tree[channel.channelID] = { "Ancestor" : False, "SourceSE" : sourceSE, 
                                  "DestSE" : targetSE, "Strategy" : "Simple" } 

    return S_OK(tree)
    
  def swarm( self, sourceSEs, targetSEs ):
    """ swarm strategy - one target, many sources, pick up the fastest 
    
    :param list sourceSEs: list of source SE 
    :param str targetSEs: on element list with name of target SE
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
    tree = {}
    channels = []
    if len(targetSEs) > 1:
      return S_ERROR("swarm: wrong argument supplied for targetSEs, only one targetSE allowed")
    targetSE = targetSEs[0]
    ## find channels
    for sourceSE in sourceSEs:
      channel = self.ftsGraph.findChannel( sourceSE, targetSE )
      if not channel["OK"]:
        self.log.warn( "swarm: %s" % channel["Message"] )
        continue
      channels.append( ( sourceSE, channel["Value"] ) )      
    ## exit - no channels 
    if not channels:
      return S_ERROR("swarm: unable to find FTS channels between '%s' and '%s'" % ( ",".join(sourceSEs), targetSE ) )
    ## filter out non active channels 
    channels = [ ( sourceSE, channel ) for sourceSE, channel in channels 
                 if channel.fromNode.SEs[sourceSE]["read"] and channel.toNode.SEs[targetSE]["write"] and 
                 channel.status == "Active" and channel.timeToStart < float("inf") ]
    ## exit - no active channels 
    if not channels:
      return S_ERROR( "swarm: no active channels found between %s and %s" % ( sourceSEs, targetSE ) )
    
    ## find min timeToStart
    minTimeToStart = float("inf")
    selSourceSE = selChannel = None
    for sourceSE, ftsChannel in channels:
      if ftsChannel.timeToStart < minTimeToStart:
        minTimeToStart = ftsChannel.timeToStart
        selSourceSE = sourceSE
        selChannel = ftsChannel
    
    if not selSourceSE:
      return S_ERROR( "swarm: no active channels found between %s and %s" % ( sourceSEs, targetSE ) )

    tree[selChannel.channelID] = { "Ancestor" : False, "SourceSE" : selSourceSE,
                                   "DestSE" : targetSE, "Strategy" : "Swarm" } 
    return S_OK( tree )
          
  def minimiseTotalWait( self, sourceSEs, targetSEs ):
    """ find dag that minimises start time 
    
    :param list sourceSEs: list of avialable source SEs
    :param list targetSEs: list of target SEs
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
    tree = {}
    primarySources = sourceSEs
    while targetSEs:
      minTimeToStart = float("inf")
      channels = []
      for targetSE in targetSEs:
        for sourceSE in sourceSEs:
          ftsChannel = self.ftsGraph.findChannel( sourceSE, targetSE )
          if not ftsChannel["OK"]:
            self.log.warn( "minimiseTotalWait: %s" % ftsChannel["Message"] )
            continue 
          ftsChannel = ftsChannel["Value"]
          channels.append( ( ftsChannel, sourceSE, targetSE ) )
      if not channels:
        msg = "minimiseTotalWait: FTS channels between %s and %s not defined" % ( ",".join(sourceSEs), 
                                                                                  ",".join(targetSEs) )
        self.log.error( msg )
        return S_ERROR( msg )
      ## filter out already used channels 
      channels = [ (channel, sourceSE, targetSE) for channel, sourceSE, targetSE in channels 
                   if channel.channelID not in tree ]
      if not channels:
        msg = "minimiseTotalWait: all FTS channels between %s and %s are already used in tree" % ( ",".join(sourceSEs),
                                                                                                   ",".join(targetSEs) )
        self.log.error( msg )
        return S_ERROR( msg )
      
      self.log.debug("minimiseTotalWait: found %s candiate channels, checking activity" % len( channels) )
      channels = [ ( channel, sourceSE, targetSE ) for channel, sourceSE, targetSE in channels
                   if channel.fromNode.SEs[sourceSE]["read"] and channel.toNode.SEs[targetSE]["write"] 
                   and channel.status == "Active" and channel.timeToStart < float("inf") ]
      
      if not channels:
        self.log.error("minimiseTotalWait: no active FTS channels found" )
        return S_ERROR("minimiseTotalWait: no active FTS channels found" )
      
      candidates = []
      for channel, sourceSE, targetSE in channels:
        timeToStart = channel.timeToStart
        if sourceSE not in primarySources:
          timeToStart += self.sigma        
        ## local found 
        if channel.fromNode == channel.toNode:
          self.log.debug("minimiseTotalWait: found local channel '%s'" % channel.channelName )
          candidates = [ ( channel, sourceSE, targetSE ) ]
          break
        if timeToStart <= minTimeToStart:
          minTimeToStart = timeToStart
          candidates = [ ( channel, sourceSE, targetSE ) ]
        elif timeToStart == minTimeToStart:
          candidates.append( (channel, sourceSE, targetSE ) )

      if not candidates:
        return S_ERROR("minimiseTotalWait: unable to find candidate FTS channels minimising total wait time")

      random.shuffle( candidates )
      selChannel, selSourceSE, selTargetSE = candidates[0]
      ancestor = False
      for channelID, treeItem in tree.items():
        if selSourceSE in treeItem["DestSE"]:
          ancestor = channelID
      tree[selChannel.channelID] = { "Ancestor" : ancestor,
                                     "SourceSE" : selSourceSE,
                                     "DestSE" : selTargetSE,
                                     "Strategy" : "MinimiseTotalWait" }
      sourceSEs.append( selTargetSE )
      targetSEs.remove( selTargetSE )

    return S_OK(tree)        

  def dynamicThroughput( self, sourceSEs, targetSEs ):
    """ dynamic throughput - many sources, many targets - find dag that minimises overall throughput 

    :param list sourceSEs: list of available source SE names
    :param list targetSE: list of target SE names
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
    tree = {}
    primarySources = sourceSEs
    timeToSite = {}
    while targetSEs:
      minTimeToStart = float("inf")
      channels = []
      for targetSE in targetSEs:
        for sourceSE in sourceSEs:
          ftsChannel = self.ftsGraph.findChannel( sourceSE, targetSE )
          if not ftsChannel["OK"]:
            self.log.warn( "dynamicThroughput: %s" % ftsChannel["Message"] )
            continue 
          ftsChannel = ftsChannel["Value"]
          channels.append( ( ftsChannel, sourceSE, targetSE ) )
      ## no candidate channels found
      if not channels:
        msg = "dynamicThroughput: FTS channels between %s and %s are not defined" % ( ",".join(sourceSEs), 
                                                                                      ",".join(targetSEs) )
        self.log.error( msg )
        return S_ERROR( msg )
      ## filter out already used channels
      channels = [ (channel, sourceSE, targetSE) for channel, sourceSE, targetSE in channels 
                   if channel.channelID not in tree ]
      if not channels:
        msg = "dynamicThroughput: all FTS channels between %s and %s are already used in tree" % ( ",".join(sourceSEs), 
                                                                                                   ",".join(targetSEs) )
        self.log.error( msg )
        return S_ERROR( msg )
      ## filter out non-active channels
      self.log.debug("dynamicThroughput: found %s candidate channels, checking activity" % len(channels) )
      channels = [ ( channel, sourceSE, targetSE ) for channel, sourceSE, targetSE in channels
                   if channel.fromNode.SEs[sourceSE]["read"] and channel.toNode.SEs[targetSE]["write"] 
                   and channel.status == "Active" and channel.timeToStart < float("inf") ]
      if not channels:
        self.log.info("dynamicThroughput: active candidate channels not found")
        return S_ERROR("dynamicThroughput: no active candidate FTS channels")
      
      candidates = []
      selTimeToStart = None
      for channel, sourceSE, targetSE in channels:
        timeToStart = channel.timeToStart
        if sourceSE not in primarySources:
          timeToStart += self.sigma        
        if sourceSE in timeToSite:
          timeToStart += timeToSite[sourceSE]
        ## local found 
        if channel.fromNode == channel.toNode:
          self.log.debug("dynamicThroughput: found local channel '%s'" % channel.channelName )
          candidates = [ ( channel, sourceSE, targetSE ) ]
          selTimeToStart = timeToStart
          break
        if timeToStart <= minTimeToStart:
          selTimeToStart = timeToStart
          minTimeToStart = timeToStart
          candidates = [ ( channel, sourceSE, targetSE ) ]
        elif timeToStart == minTimeToStart:
          candidates.append( (channel, sourceSE, targetSE ) )

      if not candidates:
        return S_ERROR("dynamicThroughput: unable to find candidate FTS channels")

      random.shuffle( candidates )
      selChannel, selSourceSE, selTargetSE = candidates[0]
      ancestor = False
      for channelID, treeItem in tree.items():
        if selSourceSE in treeItem["DestSE"]:
          ancestor = channelID
      tree[selChannel.channelID] = { "Ancestor" : ancestor,
                                     "SourceSE" : selSourceSE,
                                     "DestSE" : selTargetSE,
                                     "Strategy" : "DynamicThroughput" }
      timeToSite[selTargetSE] = selTimeToStart 
      sourceSEs.append( selTargetSE )
      targetSEs.remove( selTargetSE )
  
    return S_OK( tree )

  def reset( self ):
    """ reset :chosenStrategy: 

    :param self: self reference
    """
    self.chosenStrategy = 0

  def getSupportedStrategies( self ):
    """ Get supported strategies.

    :param self: self reference
    """    
    return self.supportedStrategies

  def replicationTree( self, sourceSEs, targetSEs, size, strategy=None ):
    """ get replication tree

    :param str lfn: LFN
    :param list sourceSEs: list of sources SE names to use
    :param list targetSEs: liost of target SE names to use
    :param long size: file size
    :param str strategy: strategy name
    """
    ## update SEs rwAccess every rwUpdatePertion timedelta (default 300 s)
    now = datetime.datetime.now()
    if now - self.lastRssUpdate > self.rwUpdatePeriod:
      update = self.updateGraph( rwAccess=True )
      if not update["OK"]:
        self.log.warn("replicationTree: unable to update FTS graph: %s" % update["Message"] )
      else:
        self.lastRssUpdate = now
    ## get strategy
    strategy = strategy if strategy else self.__selectStrategy()
    if strategy not in self.getSupportedStrategies():
      return S_ERROR("replicationTree: unsupported strategy '%s'" % strategy )

    self.log.info( "replicationTree: strategy=%s sourceSEs=%s targetSEs=%s size=%s" %\
                     ( strategy, sourceSEs, targetSEs, size ) )
    ## fire action from dispatcher
    tree = self.strategyDispatcher[strategy]( sourceSEs, targetSEs )
    if not tree["OK"]:
      self.log.error( "replicationTree: %s" % tree["Message"] )
      return tree
    ## update graph edges
    update = self.updateGraph( replicationTree=tree["Value"], size=size )
    if not update["OK"]:
      self.log.error( "replicationTree: unable to update FTS graph: %s" % update["Message"] )
      return update
    return tree
    
  def __selectStrategy( self ):
    """ If more than one active strategy use one after the other.

    :param self: self reference
    """
    chosenStrategy = self.activeStrategies[self.chosenStrategy]
    self.chosenStrategy += 1
    if self.chosenStrategy == self.numberOfStrategies:
      self.chosenStrategy = 0
    return chosenStrategy

  def __getRWAccessForSE( self, seList ):
    """ get RSS R/W for :seList: 

    :param list seList: SE list
    """
    rwDict = dict.fromkeys( seList )
    for se in rwDict:
      rwDict[se] = { "read" : False, "write" : False  }
    rAccess = self.resourceStatus.getStorageElementStatus( seList, statusType = "ReadAccess", default = 'Unknown' )
    if not rAccess["OK"]:
      return rAccess["Message"]
    rAccess = [ k for k, v in rAccess["Value"].items() if "ReadAccess" in v and v["ReadAccess"] in ( "Active", 
                                                                                                     "Degraded" ) ]
    wAccess = self.resourceStatus.getStorageElementStatus( seList, statusType = "WriteAccess", default = 'Unknown' )
    if not wAccess["OK"]:
      return wAccess["Message"]
    wAccess = [ k for k, v in wAccess["Value"].items() if "WriteAccess" in v and v["WriteAccess"] in ( "Active", 
                                                                                                       "Degraded" ) ]
    for se in rwDict:
      rwDict[se]["read"] = se in rAccess
      rwDict[se]["write"] = se in wAccess
    return S_OK( rwDict )
Exemplo n.º 11
0
class StorageElement:
  """
  .. class:: StorageElement

  common interface to the grid storage element
  """

  def __init__( self, name, protocols = None, vo = None ):
    """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    self.resources = Resources( vo = self.vo )

    proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
    result = self.resources.getAccessProtocols( name )
    if result['OK']:
      ap = result['Value'][0]
      useProxy = ( self.resources.getAccessProtocolValue( ap, "Protocol", "UnknownProtocol" )
                   in proxiedProtocols )

    #print "Proxy", name, proxiedProtocols, \
    #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

    if not useProxy:
      useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if protocols == None:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
    else:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localProtocols = factoryDict['LocalProtocols']
      self.remoteProtocols = factoryDict['RemoteProtocols']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )

    self.readMethods = [ 'getFile',
                         'getAccessUrl',
                         'getTransportURL',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                           ]

    self.okMethods = [ 'getLocalProtocols',
                       'getPfnForProtocol',
                       'getPfnForLfn',
                       'getPfnPath',
                       'getProtocols',
                       'getRemoteProtocols',
                       'getStorageElementName',
                       'getStorageElementOption',
                       'getStorageParameters',
                       'isLocalSE' ]

    self.__resourceStatus = ResourceStatus()
    
  def dump( self ):
    """ Dump to the logger a summary of the StorageElement items. """
    self.log.info( "dump: Preparing dump for StorageElement %s." % self.name )
    if not self.valid:
      self.log.error( "dump: Failed to create StorageElement plugins.", self.errorReason )
      return
    i = 1
    outStr = "\n\n============ Options ============\n"
    for key in sorted( self.options ):
      outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )

    for storage in self.storages:
      outStr = "%s============Protocol %s ============\n" % ( outStr, i )
      res = storage.getParameters()
      storageParameters = res['Value']
      for key in sorted( storageParameters ):
        outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
      i = i + 1
    self.log.info( outStr )

  #################################################################################################
  #
  # These are the basic get functions for storage configuration
  #

  def getStorageElementName( self ):
    """ SE name getter """
    self.log.verbose( "getStorageElementName: The Storage Element name is %s." % self.name )
    return S_OK( self.name )

  def getChecksumType( self ):
    """ get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
        global /Resources/StorageElements/ChecksumType
    """
    return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
                 if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )

  def getStatus( self ):
    """
     Return Status of the SE, a dictionary with:
      - Read: True (is allowed), False (it is not allowed)
      - Write: True (is allowed), False (it is not allowed)
      - Remove: True (is allowed), False (it is not allowed)
      - Check: True (is allowed), False (it is not allowed).
      NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
      - DiskSE: True if TXDY with Y > 0 (defaults to True)
      - TapeSE: True if TXDY with X > 0 (defaults to False)
      - TotalCapacityTB: float (-1 if not defined)
      - DiskCacheTB: float (-1 if not defined)
    """
    retDict = {}
    if not self.valid:
      retDict['Read'] = False
      retDict['Write'] = False
      retDict['Remove'] = False
      retDict['Check'] = False
      retDict['DiskSE'] = False
      retDict['TapeSE'] = False
      retDict['TotalCapacityTB'] = -1
      retDict['DiskCacheTB'] = -1
      return S_OK( retDict )

    # If nothing is defined in the CS Access is allowed
    # If something is defined, then it must be set to Active
    retDict['Read'] = self.__resourceStatus.isUsableStorage( self.name, 'ReadAccess' )
    retDict['Write'] = self.__resourceStatus.isUsableStorage( self.name, 'WriteAccess' )
    retDict['Remove'] = self.__resourceStatus.isUsableStorage( self.name, 'RemoveAccess' )
    if retDict['Read']:
      retDict['Check'] = True
    else:
      retDict['Check'] = self.__resourceStatus.isUsableStorage( self.name, 'CheckAccess' )
    diskSE = True
    tapeSE = False
    if 'SEType' in self.options:
      # Type should follow the convention TXDY
      seType = self.options['SEType']
      diskSE = re.search( 'D[1-9]', seType ) != None
      tapeSE = re.search( 'T[1-9]', seType ) != None
    retDict['DiskSE'] = diskSE
    retDict['TapeSE'] = tapeSE
    try:
      retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
    except Exception:
      retDict['TotalCapacityTB'] = -1
    try:
      retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
    except Exception:
      retDict['DiskCacheTB'] = -1

    return S_OK( retDict )

  def isValid( self, operation = '' ):
    """ check CS/RSS statuses for :operation:

    :param str operation: operation name
    """
    self.log.debug( "isValid: Determining whether the StorageElement %s is valid for %s" % ( self.name,
                                                                                             operation ) )

    if ( not operation ) or ( operation in self.okMethods ):
      return S_OK()

    if not self.valid:
      self.log.error( "isValid: Failed to create StorageElement plugins.", self.errorReason )
      return S_ERROR( self.errorReason )
    # Determine whether the StorageElement is valid for checking, reading, writing
    res = self.getStatus()
    if not res[ 'OK' ]:
      self.log.error( "Could not call getStatus" )
      return S_ERROR( "StorageElement.isValid could not call the getStatus method" )
    checking = res[ 'Value' ][ 'Check' ]
    reading = res[ 'Value' ][ 'Read' ]
    writing = res[ 'Value' ][ 'Write' ]
    removing = res[ 'Value' ][ 'Remove' ]

    # Determine whether the requested operation can be fulfilled
    if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
      self.log.error( "isValid: Read, write and check access not permitted." )
      return S_ERROR( "StorageElement.isValid: Read, write and check access not permitted." )

    # The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
    if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
      operation = 'ReadAccess'
    elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
      operation = 'WriteAccess'
    elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
      operation = 'RemoveAccess'
    elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
      operation = 'CheckAccess'
    else:
      self.log.error( "isValid: The supplied operation is not known.", operation )
      return S_ERROR( "StorageElement.isValid: The supplied operation is not known." )
    self.log.debug( "in isValid check the operation: %s " % operation )
    # Check if the operation is valid
    if operation == 'CheckAccess':
      if not reading:
        if not checking:
          self.log.error( "isValid: Check access not currently permitted." )
          return S_ERROR( "StorageElement.isValid: Check access not currently permitted." )
    if operation == 'ReadAccess':
      if not reading:
        self.log.error( "isValid: Read access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Read access not currently permitted." )
    if operation == 'WriteAccess':
      if not writing:
        self.log.error( "isValid: Write access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Write access not currently permitted." )
    if operation == 'RemoveAccess':
      if not removing:
        self.log.error( "isValid: Remove access not currently permitted." )
        return S_ERROR( "StorageElement.isValid: Remove access not currently permitted." )
    return S_OK()

  def getProtocols( self ):
    """ Get the list of all the protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getProtocols: Obtaining all protocols." )
    allProtocols = self.localProtocols + self.remoteProtocols
    return S_OK( allProtocols )

  def getRemoteProtocols( self ):
    """ Get the list of all the remote access protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getRemoteProtocols: Obtaining remote protocols for %s." % self.name )
    return S_OK( self.remoteProtocols )

  def getLocalProtocols( self ):
    """ Get the list of all the local access protocols defined for this Storage Element
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getLocalProtocols: Obtaining local protocols for %s." % self.name )
    return S_OK( self.localProtocols )

  def getStorageElementOption( self, option ):
    """ Get the value for the option supplied from self.options
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    self.log.verbose( "getStorageElementOption: Obtaining %s option for Storage Element %s." % ( option,
                                                                                                 self.name ) )
    if option in self.options:
      optionValue = self.options[option]
      return S_OK( optionValue )
    else:
      errStr = "getStorageElementOption: Option not defined for SE."
      self.log.error( errStr, "%s for %s" % ( option, self.name ) )
      return S_ERROR( errStr )

  def getStorageParameters( self, protocol ):
    """ Get protocol specific options
    """
    self.log.verbose( "getStorageParameters: Obtaining storage parameters for %s protocol %s." % ( self.name,
                                                                                                   protocol ) )
    res = self.getProtocols()
    if not res['OK']:
      return res
    availableProtocols = res['Value']
    if not protocol in availableProtocols:
      errStr = "getStorageParameters: Requested protocol not available for SE."
      self.log.warn( errStr, '%s for %s' % ( protocol, self.name ) )
      return S_ERROR( errStr )
    for storage in self.storages:
      res = storage.getParameters()
      storageParameters = res['Value']
      if storageParameters['ProtocolName'] == protocol:
        return S_OK( storageParameters )
    errStr = "getStorageParameters: Requested protocol supported but no object found."
    self.log.error( errStr, "%s for %s" % ( protocol, self.name ) )
    return S_ERROR( errStr )

  def isLocalSE( self ):
    """ Test if the Storage Element is local in the current context
    """
    import DIRAC
    self.log.verbose( "isLocalSE: Determining whether %s is a local SE." % self.name )
    localSEs = getSEsForSite( DIRAC.siteName() )['Value']
    if self.name in localSEs:
      return S_OK( True )
    else:
      return S_OK( False )

  #################################################################################################
  #
  # These are the basic get functions for pfn manipulation
  #

  def getPfnForProtocol( self, pfn, protocol, withPort = True ):
    """ Transform the input pfn into another with the given protocol for the Storage Element.
    """
    res = self.getProtocols()
    if not res['OK']:
      return res
    if type( protocol ) == StringType:
      protocols = [protocol]
    elif type( protocol ) == ListType:
      protocols = protocol
    else:
      errStr = "getPfnForProtocol: Supplied protocol must be string or list of strings."
      self.log.error( errStr, "%s %s" % ( protocol, self.name ) )
      return S_ERROR( errStr )
    availableProtocols = res['Value']
    protocolsToTry = []
    for protocol in protocols:
      if protocol in availableProtocols:
        protocolsToTry.append( protocol )
      else:
        errStr = "getPfnForProtocol: Requested protocol not available for SE."
        self.log.debug( errStr, '%s for %s' % ( protocol, self.name ) )
    if not protocolsToTry:
      errStr = "getPfnForProtocol: None of the requested protocols were available for SE."
      self.log.error( errStr, '%s for %s' % ( protocol, self.name ) )
      return S_ERROR( errStr )
    # Check all available storages for required protocol then contruct the PFN
    for storage in self.storages:
      res = storage.getParameters()
      if res['Value']['ProtocolName'] in protocolsToTry:
        res = pfnparse( pfn )
        if res['OK']:
          res = storage.getProtocolPfn( res['Value'], withPort )
          if res['OK']:
            return res
    errStr = "getPfnForProtocol: Failed to get PFN for requested protocols."
    self.log.error( errStr, "%s for %s" % ( protocols, self.name ) )
    return S_ERROR( errStr )

  def getPfnPath( self, pfn ):
    """  Get the part of the PFN path below the basic storage path.
         This path must coincide with the LFN of the file in order to be compliant with the LHCb conventions.
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    res = pfnparse( pfn )
    if not res['OK']:
      return res
    fullPfnPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )

    # Check all available storages and check whether the pfn is for that protocol
    pfnPath = ''
    for storage in self.storages:
      res = storage.isPfnForProtocol( pfn )
      if res['OK']:
        if res['Value']:
          res = storage.getParameters()
          saPath = res['Value']['Path']
          if not saPath:
            # If the sa path doesn't exist then the pfn path is the entire string
            pfnPath = fullPfnPath
          else:
            if re.search( saPath, fullPfnPath ):
              # Remove the sa path from the fullPfnPath
              pfnPath = fullPfnPath.replace( saPath, '' )
      if pfnPath:
        return S_OK( pfnPath )
    # This should never happen. DANGER!!
    errStr = "getPfnPath: Failed to get the pfn path for any of the protocols!!"
    self.log.error( errStr )
    return S_ERROR( errStr )

  def getPfnForLfn( self, lfn ):
    """ Get the full PFN constructed from the LFN.
    """
    if not self.valid:
      return S_ERROR( self.errorReason )
    for storage in self.storages:
      res = storage.getPFNBase()
      if res['OK']:
        fullPath = "%s%s" % ( res['Value'], lfn )
        return S_OK( fullPath )
    # This should never happen. DANGER!!
    errStr = "getPfnForLfn: Failed to get the full pfn for any of the protocols!!"
    self.log.error( errStr )
    return S_ERROR( errStr )

  def getPFNBase( self ):
    """ Get the base to construct a PFN
    """
    if not self.storages:
      return S_ERROR( 'No storages defined' )
    for storage in self.storages:
      result = storage.getPFNBase()
      if result['OK']:
        return result

    return result

  ###########################################################################################
  #
  # This is the generic wrapper for file operations
  #

  def retransferOnlineFile( self, pfn, singleFile = False ):
    """ execcute 'retransferOnlineFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'retransferOnlineFile' )

  def exists( self, pfn, singleFile = False ):
    """ execute 'exists' operation  """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'exists' )


  def isFile( self, pfn, singleFile = False ):
    """ execute 'isFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'isFile' )

  def getFile( self, pfn, localPath = False, singleFile = False ):
    """ execute 'getFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFile', { 'localPath': localPath } )

  def putFile( self, pfn, sourceSize = 0, singleFile = False ):
    """ execute 'putFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'putFile', { 'sourceSize': sourceSize } )

  def replicateFile( self, pfn, sourceSize = 0, singleFile = False ):
    """ execute 'putFile' as replicate """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'putFile', { 'sourceSize': sourceSize } )

  def getFileMetadata( self, pfn, singleFile = False ):
    """ execute 'getFileMetadata' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFileMetadata' )

  def getFileSize( self, pfn, singleFile = False ):
    """ execute 'getFileSize' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getFileSize' )

  def getAccessUrl( self, pfn, protocol = False, singleFile = False ):
    """ execute 'getTransportURL' operation """
    if not protocol:
      protocols = self.turlProtocols
    else:
      protocols = [protocol]
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'getTransportURL', {'protocols': protocols} )

  def removeFile( self, pfn, singleFile = False ):
    """ execute 'removeFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'removeFile' )

  def prestageFile( self, pfn, lifetime = 86400, singleFile = False ):
    """ execute 'prestageFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'prestageFile', { 'lifetime': lifetime } )

  def prestageFileStatus( self, pfn, singleFile = False ):
    """ execute 'prestageFileStatus' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'prestageFileStatus' )

  def pinFile( self, pfn, lifetime = 60 * 60 * 24, singleFile = False ):
    """ execute 'pinFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'pinFile', { 'lifetime': lifetime } )

  def releaseFile( self, pfn, singleFile = False ):
    """ execute 'releaseFile' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleFile )]( pfn, 'releaseFile' )

  def isDirectory( self, pfn, singleDirectory = False ):
    """ execute 'isDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'isDirectory' )

  def getDirectoryMetadata( self, pfn, singleDirectory = False ):
    """ execute 'getDirectoryMetadata' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectoryMetadata' )

  def getDirectorySize( self, pfn, singleDirectory = False ):
    """ execute 'getDirectorySize' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectorySize' )

  def listDirectory( self, pfn, singleDirectory = False ):
    """ execute 'listDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'listDirectory' )

  def removeDirectory( self, pfn, recursive = False, singleDirectory = False ):
    """ execute 'removeDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'removeDirectory', {'recursive':
                                                                                               recursive} )

  def createDirectory( self, pfn, singleDirectory = False ):
    """ execute 'createDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'createDirectory' )

  def putDirectory( self, pfn, singleDirectory = False ):
    """ execute 'putDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'putDirectory' )

  def getDirectory( self, pfn, localPath = False, singleDirectory = False ):
    """ execute 'getDirectory' operation """
    return { True : self.__executeSingleFile,
             False : self.__executeFunction }[bool( singleDirectory )]( pfn, 'getDirectory', { 'localPath':
                                                                                             localPath } )

  def __executeSingleFile( self, pfn, operation, arguments = None ):
    """ execute for single file """
    if arguments == None:
      res = self.__executeFunction( pfn, operation, {} )
    else:
      res = self.__executeFunction( pfn, operation, arguments )
    if type( pfn ) == ListType:
      pfn = pfn[0]
    elif type( pfn ) == DictType:
      pfn = pfn.keys()[0]
    if not res['OK']:
      return res
    elif pfn in res['Value']['Failed']:
      errorMessage = res['Value']['Failed'][pfn]
      return S_ERROR( errorMessage )
    else:
      return S_OK( res['Value']['Successful'][pfn] )

  def __executeFunction( self, pfn, method, argsDict = None ):
    """
        'pfn' is the physical file name (as registered in the LFC)
        'method' is the functionality to be executed
    """
    ## default args  = no args
    argsDict = argsDict if argsDict else {}
    if type( pfn ) in StringTypes:
      pfns = {pfn:False}
    elif type( pfn ) == ListType:
      pfns = {}
      for url in pfn:
        pfns[url] = False
    elif type( pfn ) == DictType:
      pfns = pfn.copy()
    else:
      errStr = "__executeFunction: Supplied pfns must be string or list of strings or a dictionary."
      self.log.error( errStr )
      return S_ERROR( errStr )

    if not pfns:
      self.log.verbose( "__executeFunction: No pfns supplied." )
      return S_OK( {'Failed':{}, 'Successful':{}} )
    self.log.verbose( "__executeFunction: Attempting to perform '%s' operation with %s pfns." % ( method,
                                                                                                  len( pfns ) ) )

    res = self.isValid( operation = method )
    if not res['OK']:
      return res
    else:
      if not self.valid:
        return S_ERROR( self.errorReason )

    successful = {}
    failed = {}
    localSE = self.isLocalSE()['Value']
    # Try all of the storages one by one
    for storage in self.storages:
      # Determine whether to use this storage object
      res = storage.getParameters()
      useProtocol = True
      if not res['OK']:
        self.log.error( "__executeFunction: Failed to get storage parameters.", "%s %s" % ( self.name,
                                                                                            res['Message'] ) )
        useProtocol = False
      else:
        protocolName = res['Value']['ProtocolName']
        if not pfns:
          useProtocol = False
          self.log.verbose( "__executeFunction: No pfns to be attempted for %s protocol." % protocolName )
        elif not ( protocolName in self.remoteProtocols ) and not localSE:
          # If the SE is not local then we can't use local protocols
          useProtocol = False
          self.log.verbose( "__executeFunction: Protocol not appropriate for use: %s." % protocolName )
      if useProtocol:
        self.log.verbose( "__executeFunction: Generating %s protocol PFNs for %s." % ( len( pfns ),
                                                                                       protocolName ) )
        res = self.__generatePfnDict( pfns, storage )
        pfnDict = res['Value']
        failed.update( res['Failed'] )
        if not len( pfnDict ) > 0:
          self.log.verbose( "__executeFunction No pfns generated for protocol %s." % protocolName )
        else:
          self.log.verbose( "__executeFunction: Attempting to perform '%s' for %s physical files" % ( method,
                                                                                                      len( pfnDict ) ) )
          fcn = None
          if hasattr( storage, method ) and callable( getattr( storage, method ) ):
            fcn = getattr( storage, method )
          if not fcn:
            return S_ERROR( "__executeFunction: unable to invoke %s, it isn't a member function of storage" )

          pfnsToUse = {}
          for pfn in pfnDict:
            pfnsToUse[pfn] = pfns[pfnDict[pfn]]

          res = fcn( pfnsToUse, **argsDict )

          if not res['OK']:
            errStr = "__executeFunction: Completely failed to perform %s." % method
            self.log.error( errStr, '%s for protocol %s: %s' % ( self.name, protocolName, res['Message'] ) )
            for pfn in pfnDict.values():
              if pfn not in failed:
                failed[pfn] = ''
              failed[pfn] = "%s %s" % ( failed[pfn], res['Message'] )
          else:
            for protocolPfn, pfn in pfnDict.items():
              if protocolPfn not in res['Value']['Successful']:
                if pfn not in failed:
                  failed[pfn] = ''
                if protocolPfn in res['Value']['Failed']:
                  failed[pfn] = "%s %s" % ( failed[pfn], res['Value']['Failed'][protocolPfn] )
                else:
                  failed[pfn] = "%s %s" % ( failed[pfn], 'No error returned from plug-in' )
              else:
                successful[pfn] = res['Value']['Successful'][protocolPfn]
                if pfn in failed:
                  failed.pop( pfn )
                pfns.pop( pfn )

    return S_OK( { 'Failed': failed, 'Successful': successful } )

  def __generatePfnDict( self, pfns, storage ):
    """ whatever, it creates PFN dict  """
    pfnDict = {}
    failed = {}
    for pfn in pfns:
      res = pfnparse( pfn )
      if not res['OK']:
        errStr = "__generatePfnDict: Failed to parse supplied PFN."
        self.log.error( errStr, "%s: %s" % ( pfn, res['Message'] ) )
        if pfn not in failed:
          failed[pfn] = ''
        failed[pfn] = "%s %s" % ( failed[pfn], errStr )
      else:
        res = storage.getProtocolPfn( res['Value'], True )
        if not res['OK']:
          errStr = "__generatePfnDict %s." % res['Message']
          self.log.error( errStr, 'for %s' % ( pfn ) )
          if pfn not in failed:
            failed[pfn] = ''
          failed[pfn] = "%s %s" % ( failed[pfn], errStr )
        else:
          pfnDict[res['Value']] = pfn
    res = S_OK( pfnDict )
    res['Failed'] = failed
    return res
Exemplo n.º 12
0
class FTS3ServerPolicy(object):
    """
    This class manages the policy for choosing a server
    """
    def __init__(self, serverDict, serverPolicy="Random"):
        """
        Call the init of the parent, and initialize the list of FTS3 servers
        """

        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self._serverDict = serverDict
        self._serverList = list(serverDict)
        self._maxAttempts = len(self._serverList)
        self._nextServerID = 0
        self._resourceStatus = ResourceStatus()

        methName = "_%sServerPolicy" % serverPolicy.lower()
        if not hasattr(self, methName):
            self.log.error("Unknown server policy %s. Using Random instead" %
                           serverPolicy)
            methName = "_randomServerPolicy"

        self._policyMethod = getattr(self, methName)

    def _failoverServerPolicy(self, _attempt):
        """
        Returns always the server at a given position (normally the first one)

        :param attempt: position of the server in the list
        """
        if _attempt >= len(self._serverList):
            raise Exception(
                "FTS3ServerPolicy.__failoverServerPolicy: attempt to reach non existing server index"
            )
        return self._serverList[_attempt]

    def _sequenceServerPolicy(self, _attempt):
        """
        Every time the this policy is called, return the next server on the list
        """

        fts3server = self._serverList[self._nextServerID]
        self._nextServerID = (self._nextServerID + 1) % len(self._serverList)
        return fts3server

    def _randomServerPolicy(self, _attempt):
        """
        return a server from shuffledServerList
        """

        if getattr(threadLocal, "shuffledServerList", None) is None:
            threadLocal.shuffledServerList = self._serverList[:]
            random.shuffle(threadLocal.shuffledServerList)

        fts3Server = threadLocal.shuffledServerList[_attempt]

        if _attempt == self._maxAttempts - 1:
            random.shuffle(threadLocal.shuffledServerList)

        return fts3Server

    def _getFTSServerStatus(self, ftsServer):
        """Fetch the status of the FTS server from RSS"""

        res = self._resourceStatus.getElementStatus(ftsServer, "FTS")
        if not res["OK"]:
            return res

        result = res["Value"]
        if ftsServer not in result:
            return S_ERROR("No FTS Server %s known to RSS" % ftsServer)

        if result[ftsServer]["all"] == "Active":
            return S_OK(True)

        return S_OK(False)

    def chooseFTS3Server(self):
        """
        Choose the appropriate FTS3 server depending on the policy
        """

        fts3Server = None
        attempt = 0

        while not fts3Server and attempt < self._maxAttempts:

            fts3Server = self._policyMethod(attempt)
            res = self._getFTSServerStatus(fts3Server)

            if not res["OK"]:
                self.log.warn("Error getting the RSS status for %s: %s" %
                              (fts3Server, res))
                fts3Server = None
                attempt += 1
                continue

            ftsServerStatus = res["Value"]

            if not ftsServerStatus:
                self.log.warn(
                    "FTS server %s is not in good shape. Choose another one" %
                    fts3Server)
                fts3Server = None
                attempt += 1

        if fts3Server:
            return S_OK(self._serverDict[fts3Server])

        return S_ERROR("Could not find an FTS3 server (max attempt reached)")
Exemplo n.º 13
0
def main():
    global fullMatch
    global sites
    Script.registerSwitch("F", "full-match", "Check all the matching criteria",
                          setFullMatch)
    Script.registerSwitch(
        "S:", "site=", "Check matching for these sites (comma separated list)",
        setSites)
    Script.registerArgument("job_JDL: file with job JDL description")
    _, args = Script.parseCommandLine(ignoreErrors=True)

    from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
    from DIRAC.ConfigurationSystem.Client.Helpers import Resources
    from DIRAC.Core.Utilities.PrettyPrint import printTable
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
    from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
    from DIRAC.WorkloadManagementSystem.Utilities.QueueUtilities import getQueuesResolved, matchQueue

    with open(args[0]) as f:
        jdl = f.read()

    # Get the current VO
    result = getVOfromProxyGroup()
    if not result["OK"]:
        gLogger.error("No proxy found, please login")
        DIRACExit(-1)
    voName = result["Value"]

    resultQueues = Resources.getQueues(siteList=sites, community=voName)
    if not resultQueues["OK"]:
        gLogger.error("Failed to get CE information")
        DIRACExit(-1)
    siteDict = resultQueues["Value"]
    result = getQueuesResolved(siteDict, {}, checkPlatform=True)
    if not resultQueues["OK"]:
        gLogger.error("Failed to get CE information")
        DIRACExit(-1)
    queueDict = result["Value"]

    # get list of usable sites within this cycle
    resultMask = SiteStatus().getUsableSites()
    if not resultMask["OK"]:
        gLogger.error("Failed to get Site mask information")
        DIRACExit(-1)
    siteMaskList = resultMask.get("Value", [])

    rssClient = ResourceStatus()

    fields = ("Site", "CE", "Queue", "Status", "Match", "Reason")
    records = []

    for queue, queueInfo in queueDict.items():
        site = queueInfo["Site"]
        ce = queueInfo["CEName"]
        siteStatus = "Active" if site in siteMaskList else "InActive"
        ceStatus = siteStatus
        if rssClient.rssFlag:
            result = rssClient.getElementStatus(ce, "ComputingElement")
            if result["OK"]:
                ceStatus = result["Value"][ce]["all"]

        result = matchQueue(jdl,
                            queueInfo["ParametersDict"],
                            fullMatch=fullMatch)
        if not result["OK"]:
            gLogger.error("Failed in getting match data", result["Message"])
            DIRACExit(-1)
        status = "Active" if siteStatus == "Active" and ceStatus == "Active" else "Inactive"
        if result["Value"]["Match"]:
            records.append(
                (site, ce, queueInfo["QueueName"], status, "Yes", ""))
        else:
            records.append((site, ce, queueInfo["QueueName"], status, "No",
                            result["Value"]["Reason"]))

    gLogger.notice(
        printTable(fields,
                   records,
                   sortField="Site",
                   columnSeparator="  ",
                   printOut=False))
Exemplo n.º 14
0
class StorageFactory:

  def __init__( self, useProxy = False, vo = None ):

    self.rootConfigPath = '/Resources/StorageElements'
    self.valid = True
    self.proxy = False
    self.proxy = useProxy
    self.resourceStatus = ResourceStatus()
    self.vo = vo

  ###########################################################################################
  #
  # Below are public methods for obtaining storage objects
  #

  def getStorageName( self, initialName ):
    return self._getConfigStorageName( initialName )

  def getStorage( self, parameterDict ):
    """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
    # The storage name must be supplied.
    if parameterDict.has_key( 'StorageName' ):
      storageName = parameterDict['StorageName']
    else:
      errStr = "StorageFactory.getStorage: StorageName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    # ProtocolName must be supplied otherwise nothing with work.
    if parameterDict.has_key( 'ProtocolName' ):
      protocolName = parameterDict['ProtocolName']
    else:
      errStr = "StorageFactory.getStorage: ProtocolName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    # The other options need not always be specified
    if parameterDict.has_key( 'Protocol' ):
      protocol = parameterDict['Protocol']
    else:
      protocol = ''

    if parameterDict.has_key( 'Port' ):
      port = parameterDict['Port']
    else:
      port = ''

    if parameterDict.has_key( 'Host' ):
      host = parameterDict['Host']
    else:
      host = ''

    if parameterDict.has_key( 'Path' ):
      path = parameterDict['Path']
    else:
      path = ''

    if parameterDict.has_key( 'SpaceToken' ):
      spaceToken = parameterDict['SpaceToken']
    else:
      spaceToken = ''

    if parameterDict.has_key( 'WSUrl' ):
      wsPath = parameterDict['WSUrl']
    else:
      wsPath = ''

    return self.__generateStorageObject( storageName, protocolName, protocol, path, host, port, spaceToken, wsPath, parameterDict )


  def getStorages( self, storageName, protocolList = [] ):
    """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'protocolList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
    self.remoteProtocols = []
    self.localProtocols = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []

    # Get the name of the storage provided
    res = self._getConfigStorageName( storageName )
    if not res['OK']:
      self.valid = False
      return res
    storageName = res['Value']
    self.name = storageName

    # Get the options defined in the CS for this storage
    res = self._getConfigStorageOptions( storageName )
    if not res['OK']:
      self.valid = False
      return res
    self.options = res['Value']

    # Get the protocol specific details
    res = self._getConfigStorageProtocols( storageName )
    if not res['OK']:
      self.valid = False
      return res
    self.protocolDetails = res['Value']

    requestedLocalProtocols = []
    requestedRemoteProtocols = []
    requestedProtocolDetails = []
    turlProtocols = []
    # Generate the protocol specific plug-ins
    self.storages = []
    for protocolDict in self.protocolDetails:
      protocolName = protocolDict['ProtocolName']
      protocolRequested = True
      if protocolList:
        if protocolName not in protocolList:
          protocolRequested = False
      if protocolRequested:
        protocol = protocolDict['Protocol']
        host = protocolDict['Host']
        path = protocolDict['Path']
        port = protocolDict['Port']
        spaceToken = protocolDict['SpaceToken']
        wsUrl = protocolDict['WSUrl']
        res = self.__generateStorageObject( storageName, protocolName, protocol,
                                            path = path, host = host, port = port,
                                            spaceToken = spaceToken, wsUrl = wsUrl,
                                            parameters = protocolDict )
        if res['OK']:
          self.storages.append( res['Value'] )
          if protocolName in self.localProtocols:
            turlProtocols.append( protocol )
            requestedLocalProtocols.append( protocolName )
          if protocolName in self.remoteProtocols:
            requestedRemoteProtocols.append( protocolName )
          requestedProtocolDetails.append( protocolDict )
        else:
          gLogger.info( res['Message'] )

    if len( self.storages ) > 0:
      resDict = {}
      resDict['StorageName'] = self.name
      resDict['StorageOptions'] = self.options
      resDict['StorageObjects'] = self.storages
      resDict['LocalProtocols'] = requestedLocalProtocols
      resDict['RemoteProtocols'] = requestedRemoteProtocols
      resDict['ProtocolOptions'] = requestedProtocolDetails
      resDict['TurlProtocols'] = turlProtocols
      return S_OK( resDict )
    else:
      errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
      gLogger.error( errStr, self.name )
      return S_ERROR( errStr )
  ###########################################################################################
  #
  # Below are internal methods for obtaining section/option/value configuration
  #

  def _getConfigStorageName( self, storageName ):
    """
      This gets the name of the storage the configuration service.
      If the storage is an alias for another the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
    configPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getOptions( configPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
      gLogger.error( errStr, res['Message'] )
      return S_ERROR( errStr )
    if not res['Value']:
      errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
      gLogger.error( errStr, configPath )
      return S_ERROR( errStr )
    if 'Alias' in res['Value']:
      configPath = '%s/%s/Alias' % ( self.rootConfigPath, storageName )
      aliasName = gConfig.getValue( configPath )
      result = self._getConfigStorageName( aliasName )
      if not result['OK']:
        errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
        gLogger.error( errStr, configPath )
        return S_ERROR( errStr )
      resolvedName = result['Value']
    else:
      resolvedName = storageName
    return S_OK( resolvedName )

  def _getConfigStorageOptions( self, storageName ):
    """ Get the options associated to the StorageElement as defined in the CS
    """
    storageConfigPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getOptions( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    options = res['Value']
    optionsDict = {}
    for option in options:

      if option in [ 'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess']:
        continue
      optionConfigPath = '%s/%s' % ( storageConfigPath, option )
      optionsDict[option] = gConfig.getValue( optionConfigPath, '' )

    res = self.resourceStatus.getStorageElementStatus( storageName )
    if not res[ 'OK' ]:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )

    # For safety, we did not add the ${statusType}Access keys
    # this requires modifications in the StorageElement class

    # We add the dictionary with the statusTypes and values
    # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
    optionsDict.update( res[ 'Value' ][ storageName ] )

    return S_OK( optionsDict )

  def _getConfigStorageProtocols( self, storageName ):
    """ Protocol specific information is present as sections in the Storage configuration
    """
    storageConfigPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getSections( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    protocolSections = res['Value']
    sortedProtocols = sortList( protocolSections )
    protocolDetails = []
    for protocol in sortedProtocols:
      res = self._getConfigStorageProtocolDetails( storageName, protocol )
      if not res['OK']:
        return res
      protocolDetails.append( res['Value'] )
    self.protocols = self.localProtocols + self.remoteProtocols
    return S_OK( protocolDetails )

  def _getConfigStorageProtocolDetails( self, storageName, protocol ):
    """
      Parse the contents of the protocol block
    """
    # First obtain the options that are available
    protocolConfigPath = '%s/%s/%s' % ( self.rootConfigPath, storageName, protocol )
    res = gConfig.getOptions( protocolConfigPath )
    if not res['OK']:
      errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocol ) )
      return S_ERROR( errStr )
    options = res['Value']

    # We must have certain values internally even if not supplied in CS
    protocolDict = {'Access':'', 'Host':'', 'Path':'', 'Port':'', 'Protocol':'', 'ProtocolName':'', 'SpaceToken':'', 'WSUrl':''}
    for option in options:
      configPath = '%s/%s' % ( protocolConfigPath, option )
      optionValue = gConfig.getValue( configPath, '' )
      protocolDict[option] = optionValue
      
    # Evaluate the base path taking into account possible VO specific setting 
    if self.vo:
      result = gConfig.getOptionsDict( cfgPath( protocolConfigPath, 'VOPath' ) )
      voPath = ''
      if result['OK']:
        voPath = result['Value'].get( self.vo, '' )
      if voPath:
        protocolDict['Path'] = voPath  

    # Now update the local and remote protocol lists.
    # A warning will be given if the Access option is not set.
    if protocolDict['Access'] == 'remote':
      self.remoteProtocols.append( protocolDict['ProtocolName'] )
    elif protocolDict['Access'] == 'local':
      self.localProtocols.append( protocolDict['ProtocolName'] )
    else:
      errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % ( storageName, protocol )
      gLogger.warn( errStr )

    # The ProtocolName option must be defined
    if not protocolDict['ProtocolName']:
      errStr = "StorageFactory.__getProtocolDetails: 'ProtocolName' option is not defined."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocol ) )
      return S_ERROR( errStr )
    return S_OK( protocolDict )

  ###########################################################################################
  #
  # Below is the method for obtaining the object instantiated for a provided storage configuration
  #

  def __generateStorageObject( self, storageName, protocolName, protocol, path = None,
                              host = None, port = None, spaceToken = None, wsUrl = None, parameters = {} ):

    storageType = protocolName
    if self.proxy:
      storageType = 'Proxy'

    moduleRootPaths = getInstalledExtensions()
    moduleLoaded = False
    path = path.rstrip( '/' )
    if not path:
      path = '/'
    for moduleRootPath in moduleRootPaths:
      if moduleLoaded:
        break
      gLogger.debug( "Trying to load from root path %s" % moduleRootPath )
      moduleFile = os.path.join( rootPath, moduleRootPath, "Resources", "Storage", "%sStorage.py" % storageType )
      gLogger.debug( "Looking for file %s" % moduleFile )
      if not os.path.isfile( moduleFile ):
        continue
      try:
        # This inforces the convention that the plug in must be named after the protocol
        moduleName = "%sStorage" % ( storageType )
        storageModule = __import__( '%s.Resources.Storage.%s' % ( moduleRootPath, moduleName ),
                                    globals(), locals(), [moduleName] )
      except Exception, x:
        errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % ( storageName, x )
        gLogger.exception( errStr )
        return S_ERROR( errStr )

      try:
        evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
        storage = eval( evalString )
        if not storage.isOK():
          errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
          gLogger.error( errStr, "%s" % ( moduleName ) )
          return S_ERROR( errStr )
      except Exception, x:
        errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % ( moduleName, x )
        gLogger.exception( errStr )
        return S_ERROR( errStr )

      # Set extra parameters if any
      if parameters:
        result = storage.setParameters( parameters )
        if not result['OK']:
          return result

      # If use proxy, keep the original protocol name
      if self.proxy:
        storage.protocolName = protocolName
      return S_OK( storage )
Exemplo n.º 15
0
        DIRACExit(-1)
    siteDict = resultQueues['Value']
    result = getQueuesResolved(siteDict)
    if not resultQueues['OK']:
        gLogger.error('Failed to get CE information')
        DIRACExit(-1)
    queueDict = result['Value']

    # get list of usable sites within this cycle
    resultMask = SiteStatus().getUsableSites()
    if not resultMask['OK']:
        gLogger.error('Failed to get Site mask information')
        DIRACExit(-1)
    siteMaskList = resultMask.get('Value', [])

    rssClient = ResourceStatus()

    fields = ('Site', 'CE', 'Queue', 'Status', 'Match', 'Reason')
    records = []

    for queue, queueInfo in queueDict.iteritems():
        site = queueInfo['Site']
        ce = queueInfo['CEName']
        siteStatus = "Active" if site in siteMaskList else "InActive"
        ceStatus = siteStatus
        if rssClient.rssFlag:
            result = rssClient.getElementStatus(ce, "ComputingElement")
            if result['OK']:
                ceStatus = result['Value'][ce]['all']

        result = matchQueue(jdl, queueInfo, fullMatch=fullMatch)
Exemplo n.º 16
0
 def rssClient(self):
     """ RSS client getter """
     if not self.__rssClient:
         self.__rssClient = ResourceStatus()
     return self.__rssClient
Exemplo n.º 17
0
  res = gConfig.getOptionsDict( '/Resources/Sites/LCG/%s' % site )
  if not res[ 'OK' ]:
    gLogger.error( 'The provided site (%s) is not known.' % site )
    DIRAC.exit( -1 )
  ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )

if not ses:
  gLogger.error( 'There were no SEs provided' )
  DIRAC.exit( -1 )

readBanned = []
writeBanned = []
checkBanned = []
removeBanned = []

resourceStatus = ResourceStatus()

res = resourceStatus.getElementStatus( ses, "StorageElement" )
if not res['OK']:
  gLogger.error( "Storage Element %s does not exist" % ses )
  DIRAC.exit( -1 )

reason = 'Forced with dirac-admin-ban-se by %s' % userName

for se, seOptions in res[ 'Value' ].items():

  resW = resC = resR = { 'OK' : False }

  # Eventually, we will get rid of the notion of InActive, as we always write Banned.
  if read and seOptions.has_key( 'ReadAccess' ):
Exemplo n.º 18
0
class StorageFactory:
    def __init__(self, useProxy=False, vo=None):

        self.rootConfigPath = '/Resources/StorageElements'
        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.vo = vo

    ###########################################################################################
    #
    # Below are public methods for obtaining storage objects
    #

    def getStorageName(self, initialName):
        return self._getConfigStorageName(initialName)

    def getStorage(self, parameterDict):
        """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
        # The storage name must be supplied.
        if parameterDict.has_key('StorageName'):
            storageName = parameterDict['StorageName']
        else:
            errStr = "StorageFactory.getStorage: StorageName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # ProtocolName must be supplied otherwise nothing with work.
        if parameterDict.has_key('ProtocolName'):
            protocolName = parameterDict['ProtocolName']
        else:
            errStr = "StorageFactory.getStorage: ProtocolName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # The other options need not always be specified
        if parameterDict.has_key('Protocol'):
            protocol = parameterDict['Protocol']
        else:
            protocol = ''

        if parameterDict.has_key('Port'):
            port = parameterDict['Port']
        else:
            port = ''

        if parameterDict.has_key('Host'):
            host = parameterDict['Host']
        else:
            host = ''

        if parameterDict.has_key('Path'):
            path = parameterDict['Path']
        else:
            path = ''

        if parameterDict.has_key('SpaceToken'):
            spaceToken = parameterDict['SpaceToken']
        else:
            spaceToken = ''

        if parameterDict.has_key('WSUrl'):
            wsPath = parameterDict['WSUrl']
        else:
            wsPath = ''

        return self.__generateStorageObject(storageName, protocolName,
                                            protocol, path, host, port,
                                            spaceToken, wsPath, parameterDict)

    def getStorages(self, storageName, protocolList=[]):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'protocolList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
        self.remoteProtocols = []
        self.localProtocols = []
        self.name = ''
        self.options = {}
        self.protocolDetails = []
        self.storages = []

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName)
        if not res['OK']:
            self.valid = False
            return res
        storageName = res['Value']
        self.name = storageName

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(storageName)
        if not res['OK']:
            self.valid = False
            return res
        self.options = res['Value']

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(storageName)
        if not res['OK']:
            self.valid = False
            return res
        self.protocolDetails = res['Value']

        requestedLocalProtocols = []
        requestedRemoteProtocols = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        self.storages = []
        for protocolDict in self.protocolDetails:
            protocolName = protocolDict['ProtocolName']
            protocolRequested = True
            if protocolList:
                if protocolName not in protocolList:
                    protocolRequested = False
            if protocolRequested:
                protocol = protocolDict['Protocol']
                host = protocolDict['Host']
                path = protocolDict['Path']
                port = protocolDict['Port']
                spaceToken = protocolDict['SpaceToken']
                wsUrl = protocolDict['WSUrl']
                res = self.__generateStorageObject(storageName,
                                                   protocolName,
                                                   protocol,
                                                   path=path,
                                                   host=host,
                                                   port=port,
                                                   spaceToken=spaceToken,
                                                   wsUrl=wsUrl,
                                                   parameters=protocolDict)
                if res['OK']:
                    self.storages.append(res['Value'])
                    if protocolName in self.localProtocols:
                        turlProtocols.append(protocol)
                        requestedLocalProtocols.append(protocolName)
                    if protocolName in self.remoteProtocols:
                        requestedRemoteProtocols.append(protocolName)
                    requestedProtocolDetails.append(protocolDict)
                else:
                    gLogger.info(res['Message'])

        if len(self.storages) > 0:
            resDict = {}
            resDict['StorageName'] = self.name
            resDict['StorageOptions'] = self.options
            resDict['StorageObjects'] = self.storages
            resDict['LocalProtocols'] = requestedLocalProtocols
            resDict['RemoteProtocols'] = requestedRemoteProtocols
            resDict['ProtocolOptions'] = requestedProtocolDetails
            resDict['TurlProtocols'] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)

    ###########################################################################################
    #
    # Below are internal methods for obtaining section/option/value configuration
    #

    def _getConfigStorageName(self, storageName):
        """
      This gets the name of the storage the configuration service.
      If the storage is an alias for another the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
        configPath = '%s/%s' % (self.rootConfigPath, storageName)
        res = gConfig.getOptions(configPath)
        if not res['OK']:
            errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
            gLogger.error(errStr, res['Message'])
            return S_ERROR(errStr)
        if not res['Value']:
            errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
            gLogger.error(errStr, configPath)
            return S_ERROR(errStr)
        if 'Alias' in res['Value']:
            configPath = '%s/%s/Alias' % (self.rootConfigPath, storageName)
            aliasName = gConfig.getValue(configPath)
            result = self._getConfigStorageName(aliasName)
            if not result['OK']:
                errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
                gLogger.error(errStr, configPath)
                return S_ERROR(errStr)
            resolvedName = result['Value']
        else:
            resolvedName = storageName
        return S_OK(resolvedName)

    def _getConfigStorageOptions(self, storageName):
        """ Get the options associated to the StorageElement as defined in the CS
    """
        storageConfigPath = '%s/%s' % (self.rootConfigPath, storageName)
        res = gConfig.getOptions(storageConfigPath)
        if not res['OK']:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
            gLogger.error(errStr, "%s: %s" % (storageName, res['Message']))
            return S_ERROR(errStr)
        options = res['Value']
        optionsDict = {}
        for option in options:

            if option in [
                    'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess'
            ]:
                continue
            optionConfigPath = '%s/%s' % (storageConfigPath, option)
            optionsDict[option] = gConfig.getValue(optionConfigPath, '')

        res = self.resourceStatus.getStorageElementStatus(storageName)
        if not res['OK']:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
            gLogger.error(errStr, "%s: %s" % (storageName, res['Message']))
            return S_ERROR(errStr)

        # For safety, we did not add the ${statusType}Access keys
        # this requires modifications in the StorageElement class

        # We add the dictionary with the statusTypes and values
        # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
        optionsDict.update(res['Value'][storageName])

        return S_OK(optionsDict)

    def _getConfigStorageProtocols(self, storageName):
        """ Protocol specific information is present as sections in the Storage configuration
    """
        storageConfigPath = '%s/%s' % (self.rootConfigPath, storageName)
        res = gConfig.getSections(storageConfigPath)
        if not res['OK']:
            errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
            gLogger.error(errStr, "%s: %s" % (storageName, res['Message']))
            return S_ERROR(errStr)
        protocolSections = res['Value']
        sortedProtocols = sortList(protocolSections)
        protocolDetails = []
        for protocol in sortedProtocols:
            res = self._getConfigStorageProtocolDetails(storageName, protocol)
            if not res['OK']:
                return res
            protocolDetails.append(res['Value'])
        self.protocols = self.localProtocols + self.remoteProtocols
        return S_OK(protocolDetails)

    def _getConfigStorageProtocolDetails(self, storageName, protocol):
        """
      Parse the contents of the protocol block
    """
        # First obtain the options that are available
        protocolConfigPath = '%s/%s/%s' % (self.rootConfigPath, storageName,
                                           protocol)
        res = gConfig.getOptions(protocolConfigPath)
        if not res['OK']:
            errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
            gLogger.error(errStr, "%s: %s" % (storageName, protocol))
            return S_ERROR(errStr)
        options = res['Value']

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            'Access': '',
            'Host': '',
            'Path': '',
            'Port': '',
            'Protocol': '',
            'ProtocolName': '',
            'SpaceToken': '',
            'WSUrl': ''
        }
        for option in options:
            configPath = '%s/%s' % (protocolConfigPath, option)
            optionValue = gConfig.getValue(configPath, '')
            protocolDict[option] = optionValue

        # Evaluate the base path taking into account possible VO specific setting
        if self.vo:
            result = gConfig.getOptionsDict(
                cfgPath(protocolConfigPath, 'VOPath'))
            voPath = ''
            if result['OK']:
                voPath = result['Value'].get(self.vo, '')
            if voPath:
                protocolDict['Path'] = voPath

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set.
        if protocolDict['Access'] == 'remote':
            self.remoteProtocols.append(protocolDict['ProtocolName'])
        elif protocolDict['Access'] == 'local':
            self.localProtocols.append(protocolDict['ProtocolName'])
        else:
            errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % (
                storageName, protocol)
            gLogger.warn(errStr)

        # The ProtocolName option must be defined
        if not protocolDict['ProtocolName']:
            errStr = "StorageFactory.__getProtocolDetails: 'ProtocolName' option is not defined."
            gLogger.error(errStr, "%s: %s" % (storageName, protocol))
            return S_ERROR(errStr)
        return S_OK(protocolDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the object instantiated for a provided storage configuration
    #

    def __generateStorageObject(self,
                                storageName,
                                protocolName,
                                protocol,
                                path=None,
                                host=None,
                                port=None,
                                spaceToken=None,
                                wsUrl=None,
                                parameters={}):

        storageType = protocolName
        if self.proxy:
            storageType = 'Proxy'

        moduleRootPaths = getInstalledExtensions()
        moduleLoaded = False
        path = path.rstrip('/')
        if not path:
            path = '/'
        for moduleRootPath in moduleRootPaths:
            if moduleLoaded:
                break
            gLogger.debug("Trying to load from root path %s" % moduleRootPath)
            moduleFile = os.path.join(rootPath, moduleRootPath, "Resources",
                                      "Storage", "%sStorage.py" % storageType)
            gLogger.debug("Looking for file %s" % moduleFile)
            if not os.path.isfile(moduleFile):
                continue
            try:
                # This inforces the convention that the plug in must be named after the protocol
                moduleName = "%sStorage" % (storageType)
                storageModule = __import__(
                    '%s.Resources.Storage.%s' % (moduleRootPath, moduleName),
                    globals(), locals(), [moduleName])
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % (
                    storageName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            try:
                evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
                storage = eval(evalString)
                if not storage.isOK():
                    errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
                    gLogger.error(errStr, "%s" % (moduleName))
                    return S_ERROR(errStr)
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % (
                    moduleName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            # Set extra parameters if any
            if parameters:
                result = storage.setParameters(parameters)
                if not result['OK']:
                    return result

            # If use proxy, keep the original protocol name
            if self.proxy:
                storage.protocolName = protocolName
            return S_OK(storage)
Exemplo n.º 19
0
 def rssClient(cls):
     """ ResourceStatusClient getter """
     if not cls.__rssClient:
         from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
         cls.__rssClient = ResourceStatus()
     return cls.__rssClient
Exemplo n.º 20
0
class StorageFactory(object):
    def __init__(self, useProxy=False, vo=None):
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.vo = vo
        if self.vo is None:
            result = getVOfromProxyGroup()
            if result['OK']:
                self.vo = result['Value']
            else:
                RuntimeError("Can not get the current VO context")
        self.remotePlugins = []
        self.localPlugins = []
        self.name = ''
        self.options = {}
        self.protocols = {}
        self.storages = []

    ###########################################################################################
    #
    # Below are public methods for obtaining storage objects
    #

    def getStorageName(self, initialName):
        return self._getConfigStorageName(initialName, 'Alias')

    def getStorage(self, parameterDict, hideExceptions=False):
        """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
        # The storage name must be supplied.
        if parameterDict.has_key('StorageName'):
            storageName = parameterDict['StorageName']
        else:
            errStr = "StorageFactory.getStorage: StorageName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # PluginName must be supplied otherwise nothing with work.
        pluginName = parameterDict.get('PluginName')
        if not pluginName:
            errStr = "StorageFactory.getStorage: PluginName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        return self.__generateStorageObject(storageName,
                                            pluginName,
                                            parameterDict,
                                            hideExceptions=hideExceptions)

    def getStorages(self, storageName, pluginList=None, hideExceptions=False):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        :params storageName: is the DIRAC SE name i.e. 'CERN-RAW'
        :params pluginList: is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']

        :return: dictionary containing storage elements and information about them
    """
        self.remotePlugins = []
        self.localPlugins = []
        self.name = ''
        self.options = {}
        self.protocols = {}
        self.storages = []
        if pluginList is None:
            pluginList = []
        elif isinstance(pluginList, basestring):
            pluginList = [pluginList]
        if not self.vo:
            gLogger.warn('No VO information available')

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName, 'Alias')
        if not res['OK']:
            return res
        storageName = res['Value']
        self.name = storageName

        # In case the storage is made from a base SE, get this information
        res = self._getConfigStorageName(storageName, 'BaseSE')
        if not res['OK']:
            return res
        # If the storage is derived frmo another one, keep the information
        # We initialize the seConfigPath to SE_BASE_CONFIG_PATH if there is a derivedSE, SE_CONFIG_PATH if not
        if res['Value'] != storageName:
            derivedStorageName = storageName
            storageName = res['Value']
            seConfigPath = SE_BASE_CONFIG_PATH
        else:
            derivedStorageName = None
            seConfigPath = SE_CONFIG_PATH

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(
            storageName,
            derivedStorageName=derivedStorageName,
            seConfigPath=seConfigPath)
        if not res['OK']:
            # This is for the backward compatibility and to invite developer to move their BaseSE in the correct section
            gLogger.warn("Deprecated configuration, you can ignore the error message above."\
                           " Please move the baseSE in the correct section: ", SE_BASE_CONFIG_PATH)
            # We change the value of seConfigPath to avoid other errors due to the bad SE_BASE_CONFIG_PATH
            seConfigPath = SE_CONFIG_PATH
            res = self._getConfigStorageOptions(
                storageName,
                derivedStorageName=derivedStorageName,
                seConfigPath=seConfigPath)
            if not res['OK']:
                return res
        self.options = res['Value']

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(
            storageName,
            derivedStorageName=derivedStorageName,
            seConfigPath=seConfigPath)
        if not res['OK']:
            return res
        self.protocols = res['Value']

        requestedLocalPlugins = []
        requestedRemotePlugins = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        for protocolSection, protocolDetails in self.protocols.iteritems():
            pluginName = protocolDetails.get('PluginName', protocolSection)
            if pluginList and pluginName not in pluginList:
                continue
            protocol = protocolDetails['Protocol']
            result = self.__generateStorageObject(
                storageName,
                pluginName,
                protocolDetails,
                hideExceptions=hideExceptions)
            if result['OK']:
                self.storages.append(result['Value'])
                if pluginName in self.localPlugins:
                    turlProtocols.append(protocol)
                    requestedLocalPlugins.append(pluginName)
                if pluginName in self.remotePlugins:
                    requestedRemotePlugins.append(pluginName)
                requestedProtocolDetails.append(protocolDetails)
            else:
                gLogger.info(result['Message'])

        if self.storages:
            resDict = {}
            resDict['StorageName'] = self.name
            resDict['StorageOptions'] = self.options
            resDict['StorageObjects'] = self.storages
            resDict['LocalPlugins'] = requestedLocalPlugins
            resDict['RemotePlugins'] = requestedRemotePlugins
            resDict['ProtocolOptions'] = requestedProtocolDetails
            resDict['TurlProtocols'] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)

    ###########################################################################################
    #
    # Below are internal methods for obtaining section/option/value configuration
    #

    def _getConfigStorageName(self,
                              storageName,
                              referenceType,
                              seConfigPath=SE_CONFIG_PATH):
        """
      This gets the name of the storage the configuration service.
      If the storage is a reference to another SE the resolution is performed.

      :params storageName: is the storage section to check in the CS
      :params referenceType: corresponds to an option inside the storage section
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases

      :return: the name of the storage
    """
        configPath = '%s/%s' % (seConfigPath, storageName)
        res = gConfig.getOptions(configPath)
        if not res['OK']:
            errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
            gLogger.error(errStr, res['Message'])
            return S_ERROR(errStr)
        if not res['Value']:
            errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
            gLogger.error(errStr, configPath)
            return S_ERROR(errStr)
        if referenceType in res['Value']:
            configPath = cfgPath(seConfigPath, storageName, referenceType)
            referenceName = gConfig.getValue(configPath)
            result = self._getConfigStorageName(
                referenceName, 'Alias', seConfigPath=SE_BASE_CONFIG_PATH)
            if not result['OK']:
                # This is for the backward compatibility and to invite developer to move their BaseSE in the correct section
                gLogger.warn("Deprecated configuration, you can ignore the error message above."\
                             " Please move the baseSE in the correct section: ", SE_BASE_CONFIG_PATH)
                result = self._getConfigStorageName(
                    referenceName, 'Alias', seConfigPath=SE_CONFIG_PATH)
                if not result['OK']:
                    return result
            resolvedName = result['Value']
        else:
            resolvedName = storageName
        return S_OK(resolvedName)

    def _getConfigStorageOptions(self,
                                 storageName,
                                 derivedStorageName=None,
                                 seConfigPath=SE_CONFIG_PATH):
        """ 
      Get the options associated to the StorageElement as defined in the CS

      :params storageName: is the storage section to check in the CS
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases
      :params derivedStorageName: is the storage section of a derived storage if it inherits from a base

      :return: options associated to the StorageElement as defined in the CS
    """
        optionsDict = {}

        # We first get the options of the baseSE, and then overwrite with the derivedSE
        for seName in (storageName,
                       derivedStorageName) if derivedStorageName else (
                           storageName, ):
            storageConfigPath = cfgPath(seConfigPath, seName)
            res = gConfig.getOptions(storageConfigPath)
            if not res['OK']:
                errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
                gLogger.error(errStr, "%s: %s" % (seName, res['Message']))
                return S_ERROR(errStr)
            for option in set(res['Value']) - set(
                ('ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess')):
                optionConfigPath = cfgPath(storageConfigPath, option)
                default = [] if option in [
                    'VO', 'AccessProtocols', 'WriteProtocols'
                ] else ''
                optionsDict[option] = gConfig.getValue(optionConfigPath,
                                                       default)
            # We update the seConfigPath in order to find option in derivedSE now
            seConfigPath = SE_CONFIG_PATH

        # The status is that of the derived SE only
        seName = derivedStorageName if derivedStorageName else storageName
        res = self.resourceStatus.getElementStatus(seName, "StorageElement")
        if not res['OK']:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
            gLogger.error(errStr, "%s: %s" % (seName, res['Message']))
            return S_ERROR(errStr)

        # For safety, we did not add the ${statusType}Access keys
        # this requires modifications in the StorageElement class

        # We add the dictionary with the statusTypes and values
        # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
        optionsDict.update(res['Value'][seName])

        return S_OK(optionsDict)

    def __getProtocolsSections(self, storageName, seConfigPath=SE_CONFIG_PATH):
        """
      Get the protocols of a specific storage section

      :params storageName: is the storage section to check in the CS
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases

      :return: list of protocol section names
    """
        storageConfigPath = cfgPath(seConfigPath, storageName)
        res = gConfig.getSections(storageConfigPath)
        if not res['OK']:
            errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
            gLogger.error(errStr, "%s: %s" % (storageName, res['Message']))
            return S_ERROR(errStr)
        protocolSections = res['Value']
        return S_OK(protocolSections)

    def _getConfigStorageProtocols(self,
                                   storageName,
                                   derivedStorageName=None,
                                   seConfigPath=SE_CONFIG_PATH):
        """ 
      Make a dictionary of protocols with the information associated. Merge with a base SE if it exists
      
      :params storageName: is the storage section to check in the CS
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases
      :params derivedStorageName: is the storage section of a derived storage if it inherits from a base

      :return: dictionary of protocols like {protocolSection: {protocolOptions}}
    """
        # Get the sections
        res = self.__getProtocolsSections(storageName,
                                          seConfigPath=seConfigPath)
        if not res['OK']:
            return res
        protocolSections = res['Value']
        sortedProtocolSections = sorted(protocolSections)

        # Get the details for each section in a dictionary
        for protocolSection in sortedProtocolSections:
            res = self._getConfigStorageProtocolDetails(
                storageName, protocolSection, seConfigPath=seConfigPath)
            if not res['OK']:
                return res
            self.protocols[protocolSection] = res['Value']
        if derivedStorageName:
            # We may have parameters overwriting the baseSE protocols
            res = self.__getProtocolsSections(derivedStorageName,
                                              seConfigPath=SE_CONFIG_PATH)
            if not res['OK']:
                return res
            for protocolSection in res['Value']:
                res = self._getConfigStorageProtocolDetails(
                    derivedStorageName,
                    protocolSection,
                    seConfigPath=SE_CONFIG_PATH)
                if not res['OK']:
                    return res
                detail = res['Value']
                # If we found the plugin section from which we inherit
                inheritanceMatched = False
                for baseStorageProtocolSection in protocolSections:
                    if protocolSection == baseStorageProtocolSection:
                        inheritanceMatched = True
                        for key, val in detail.iteritems():
                            if val:
                                self.protocols[protocolSection][key] = val
                        break
                # If not matched, consider it a new protocol
                if not inheritanceMatched:
                    self.protocols[protocolSection] = detail
        return S_OK(self.protocols)

    def _getConfigStorageProtocolDetails(self,
                                         storageName,
                                         protocolSection,
                                         seConfigPath=SE_CONFIG_PATH):
        """
      Parse the contents of the protocol block

      :params storageName: is the storage section to check in the CS
      :params protocolSection: name of the protocol section to find information
      :params seConfigPath: the path of the storage section. 
                              It can be /Resources/StorageElements or StorageElementBases
      :return: dictionary of the protocol options 
    """
        # First obtain the options that are available
        protocolConfigPath = cfgPath(seConfigPath, storageName,
                                     protocolSection)
        res = gConfig.getOptions(protocolConfigPath)
        if not res['OK']:
            errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
            gLogger.error(errStr, "%s: %s" % (storageName, protocolSection))
            return S_ERROR(errStr)
        options = res['Value']

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            'Access': '',
            'Host': '',
            'Path': '',
            'Port': '',
            'Protocol': '',
            'SpaceToken': '',
            'WSUrl': ''
        }
        for option in options:
            configPath = cfgPath(protocolConfigPath, option)
            optionValue = gConfig.getValue(configPath, '')
            protocolDict[option] = optionValue

        # Evaluate the base path taking into account possible VO specific setting
        if self.vo:
            result = gConfig.getOptionsDict(
                cfgPath(protocolConfigPath, 'VOPath'))
            voPath = ''
            if result['OK']:
                voPath = result['Value'].get(self.vo, '')
            if voPath:
                protocolDict['Path'] = voPath

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set and the plugin is not already in remote or local.
        plugin = protocolDict.get('PluginName', protocolSection)
        if protocolDict['Access'].lower() == 'remote':
            self.remotePlugins.append(plugin)

        elif protocolDict['Access'].lower() == 'local':
            self.localPlugins.append(plugin)
        elif protocolSection not in self.protocols:
            errStr = "StorageFactory.__getProtocolDetails: The 'Access' option \
      for %s:%s is neither 'local' or 'remote'." % (storageName,
                                                    protocolSection)
            gLogger.warn(errStr)

        return S_OK(protocolDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the object instantiated for a provided storage configuration
    #

    def __generateStorageObject(self,
                                storageName,
                                pluginName,
                                parameters,
                                hideExceptions=False):
        """
      Generate a Storage Element from parameters collected

      :params storageName: is the storage section to check in the CS
      :params pluginName: name of the plugin used. Example: GFAL2_XROOT, GFAL2_SRM2...
      :params parameters: dictionary of protocol details.
    """

        storageType = pluginName
        if self.proxy:
            storageType = 'Proxy'

        objectLoader = ObjectLoader()
        result = objectLoader.loadObject('Resources.Storage.%sStorage' %
                                         storageType,
                                         storageType + 'Storage',
                                         hideExceptions=hideExceptions)
        if not result['OK']:
            gLogger.error('Failed to load storage object: %s' %
                          result['Message'])
            return result

        storageClass = result['Value']
        try:
            storage = storageClass(storageName, parameters)
        except Exception as x:
            errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s: %s" % (
                storageName, x)
            gLogger.exception(errStr)
            return S_ERROR(errStr)

        return S_OK(storage)
Exemplo n.º 21
0
class StorageFactory( object ):

  def __init__( self, useProxy = False, vo = None ):
    self.rootConfigPath = '/Resources/StorageElements'
    self.proxy = False
    self.proxy = useProxy
    self.resourceStatus = ResourceStatus()
    self.vo = vo
    if self.vo is None:
      result = getVOfromProxyGroup()
      if result['OK']:
        self.vo = result['Value']
      else:
        RuntimeError( "Can not get the current VO context" )
    self.remotePlugins = []
    self.localPlugins = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []

  ###########################################################################################
  #
  # Below are public methods for obtaining storage objects
  #

  def getStorageName( self, initialName ):
    return self._getConfigStorageName( initialName, 'Alias' )

  def getStorage( self, parameterDict, hideExceptions = False ):
    """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
    # The storage name must be supplied.
    if parameterDict.has_key( 'StorageName' ):
      storageName = parameterDict['StorageName']
    else:
      errStr = "StorageFactory.getStorage: StorageName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    # PluginName must be supplied otherwise nothing with work.
    if parameterDict.has_key( 'PluginName' ):
      pluginName = parameterDict['PluginName']
    # Temporary fix for backward compatibility
    elif parameterDict.has_key( 'ProtocolName' ):
      pluginName = parameterDict['ProtocolName']
    else:
      errStr = "StorageFactory.getStorage: PluginName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    return self.__generateStorageObject( storageName, pluginName, parameterDict, hideExceptions = hideExceptions )

  def getStorages( self, storageName, pluginList = None, hideExceptions = False ):
    """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'pluginList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
    self.remotePlugins = []
    self.localPlugins = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []
    if pluginList is None:
      pluginList = []
    elif isinstance( pluginList, basestring ):
      pluginList = [pluginList]
    if not self.vo:
      gLogger.warn( 'No VO information available' )

    # Get the name of the storage provided
    res = self._getConfigStorageName( storageName, 'Alias' )
    if not res['OK']:
      return res
    storageName = res['Value']
    self.name = storageName

    # In case the storage is made from a base SE, get this information
    res = self._getConfigStorageName( storageName, 'BaseSE' )
    if not res['OK']:
      return res
    # If the storage is derived frmo another one, keep the information
    if res['Value'] != storageName:
      derivedStorageName = storageName
      storageName = res['Value']
    else:
      derivedStorageName = None

    # Get the options defined in the CS for this storage
    res = self._getConfigStorageOptions( storageName, derivedStorageName = derivedStorageName )
    if not res['OK']:
      return res
    self.options = res['Value']

    # Get the protocol specific details
    res = self._getConfigStorageProtocols( storageName, derivedStorageName = derivedStorageName )
    if not res['OK']:
      return res
    self.protocolDetails = res['Value']

    requestedLocalPlugins = []
    requestedRemotePlugins = []
    requestedProtocolDetails = []
    turlProtocols = []
    # Generate the protocol specific plug-ins
    for protocolDict in self.protocolDetails:
      pluginName = protocolDict.get( 'PluginName' )
      if pluginList and pluginName not in pluginList:
        continue
      protocol = protocolDict['Protocol']
      result = self.__generateStorageObject( storageName, pluginName, protocolDict, hideExceptions = hideExceptions )
      if result['OK']:
        self.storages.append( result['Value'] )
        if pluginName in self.localPlugins:
          turlProtocols.append( protocol )
          requestedLocalPlugins.append( pluginName )
        if pluginName in self.remotePlugins:
          requestedRemotePlugins.append( pluginName )
        requestedProtocolDetails.append( protocolDict )
      else:
        gLogger.info( result['Message'] )

    if len( self.storages ) > 0:
      resDict = {}
      resDict['StorageName'] = self.name
      resDict['StorageOptions'] = self.options
      resDict['StorageObjects'] = self.storages
      resDict['LocalPlugins'] = requestedLocalPlugins
      resDict['RemotePlugins'] = requestedRemotePlugins
      resDict['ProtocolOptions'] = requestedProtocolDetails
      resDict['TurlProtocols'] = turlProtocols
      return S_OK( resDict )
    else:
      errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
      gLogger.error( errStr, self.name )
      return S_ERROR( errStr )
  ###########################################################################################
  #
  # Below are internal methods for obtaining section/option/value configuration
  #

  def _getConfigStorageName( self, storageName, referenceType ):
    """
      This gets the name of the storage the configuration service.
      If the storage is a reference to another SE the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
    configPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getOptions( configPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
      gLogger.error( errStr, res['Message'] )
      return S_ERROR( errStr )
    if not res['Value']:
      errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
      gLogger.error( errStr, configPath )
      return S_ERROR( errStr )
    if referenceType in res['Value']:
      configPath = cfgPath( self.rootConfigPath, storageName, referenceType )
      referenceName = gConfig.getValue( configPath )
      result = self._getConfigStorageName( referenceName, 'Alias' )
      if not result['OK']:
        return result
      resolvedName = result['Value']
    else:
      resolvedName = storageName
    return S_OK( resolvedName )

  def _getConfigStorageOptions( self, storageName, derivedStorageName = None ):
    """ Get the options associated to the StorageElement as defined in the CS
    """
    optionsDict = {}
    # We first get the options of the baseSE, and then overwrite with the derivedSE
    for seName in ( storageName, derivedStorageName ) if derivedStorageName else ( storageName, ):
      storageConfigPath = cfgPath( self.rootConfigPath, seName )
      res = gConfig.getOptions( storageConfigPath )
      if not res['OK']:
        errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
        gLogger.error( errStr, "%s: %s" % ( seName, res['Message'] ) )
        return S_ERROR( errStr )
      for option in set( res['Value'] ) - set( ( 'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess' ) ):
        optionConfigPath = cfgPath( storageConfigPath, option )
        default = [] if option in [ 'VO' ] else ''
        optionsDict[option] = gConfig.getValue( optionConfigPath, default )

    # The status is that of the derived SE only
    seName = derivedStorageName if derivedStorageName else storageName
    res = self.resourceStatus.getStorageElementStatus( seName )
    if not res[ 'OK' ]:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
      gLogger.error( errStr, "%s: %s" % ( seName, res['Message'] ) )
      return S_ERROR( errStr )

    # For safety, we did not add the ${statusType}Access keys
    # this requires modifications in the StorageElement class

    # We add the dictionary with the statusTypes and values
    # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
    optionsDict.update( res[ 'Value' ][ seName ] )

    return S_OK( optionsDict )

  def __getProtocolsSections( self, storageName ):
    storageConfigPath = cfgPath( self.rootConfigPath, storageName )
    res = gConfig.getSections( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    protocolSections = res['Value']
    return S_OK( protocolSections )

  def _getConfigStorageProtocols( self, storageName, derivedStorageName = None ):
    """ Protocol specific information is present as sections in the Storage configuration
    """
    res = self.__getProtocolsSections( storageName )
    if not res['OK']:
      return res
    protocolSections = res['Value']
    sortedProtocolSections = sorted( protocolSections )
    protocolDetails = []
    for protocolSection in sortedProtocolSections:
      res = self._getConfigStorageProtocolDetails( storageName, protocolSection )
      if not res['OK']:
        return res
      protocolDetails.append( res['Value'] )
    if derivedStorageName:
      # We may have parameters overwriting the baseSE protocols
      res = self.__getProtocolsSections( derivedStorageName )
      if not res['OK']:
        return res
      for protocolSection in res['Value']:
        res = self._getConfigStorageProtocolDetails( derivedStorageName, protocolSection, checkAccess = False )
        if not res['OK']:
          return res
        detail = res['Value']
        pluginName = detail.get( 'PluginName' )
        if pluginName:
          for protocolDetail in protocolDetails:
            if protocolDetail.get( 'PluginName' ) == pluginName:
              protocolDetail.update( detail )
            break

    return S_OK( protocolDetails )

  def _getConfigStorageProtocolDetails( self, storageName, protocolSection, checkAccess = True ):
    """
      Parse the contents of the protocol block
    """
    # First obtain the options that are available
    protocolConfigPath = cfgPath( self.rootConfigPath, storageName, protocolSection )
    res = gConfig.getOptions( protocolConfigPath )
    if not res['OK']:
      errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
      return S_ERROR( errStr )
    options = res['Value']

    # We must have certain values internally even if not supplied in CS
    protocolDict = {'Access':'', 'Host':'', 'Path':'', 'Port':'', 'Protocol':'', 'SpaceToken':'', 'WSUrl':''}
    for option in options:
      configPath = cfgPath( protocolConfigPath, option )
      optionValue = gConfig.getValue( configPath, '' )
      protocolDict[option] = optionValue

    # This is a temporary for backward compatibility: move ProtocolName to PluginName
    protocolDict.setdefault( 'PluginName', protocolDict.pop( 'ProtocolName', None ) )

    # Evaluate the base path taking into account possible VO specific setting
    if self.vo:
      result = gConfig.getOptionsDict( cfgPath( protocolConfigPath, 'VOPath' ) )
      voPath = ''
      if result['OK']:
        voPath = result['Value'].get( self.vo, '' )
      if voPath:
        protocolDict['Path'] = voPath

    # Now update the local and remote protocol lists.
    # A warning will be given if the Access option is not set.
    if checkAccess:
      if protocolDict['Access'].lower() == 'remote':
        self.remotePlugins.append( protocolDict['PluginName'] )
      elif protocolDict['Access'].lower() == 'local':
        self.localPlugins.append( protocolDict['PluginName'] )
      else:
        errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % ( storageName, protocolSection )
        gLogger.warn( errStr )

    # The PluginName option must be defined
    if not protocolDict['PluginName']:
      errStr = "StorageFactory.__getProtocolDetails: 'PluginName' option is not defined."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
      return S_ERROR( errStr )

    return S_OK( protocolDict )

  ###########################################################################################
  #
  # Below is the method for obtaining the object instantiated for a provided storage configuration
  #

  def __generateStorageObject( self, storageName, pluginName, parameters, hideExceptions = False ):

    storageType = pluginName
    if self.proxy:
      storageType = 'Proxy'

    objectLoader = ObjectLoader()
    result = objectLoader.loadObject( 'Resources.Storage.%sStorage' % storageType, storageType + 'Storage',
                                      hideExceptions = hideExceptions )
    if not result['OK']:
      gLogger.error( 'Failed to load storage object: %s' % result['Message'] )
      return result

    storageClass = result['Value']
    try:
      storage = storageClass( storageName, parameters )
    except Exception, x:
      errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s: %s" % ( storageName, x )
      gLogger.exception( errStr )
      return S_ERROR( errStr )

    return S_OK( storage )
Exemplo n.º 22
0
    def __init__(self, name, protocols=None, vo=None):
        """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return result
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)
        self.resources = Resources(vo=self.vo)

        proxiedProtocols = gConfig.getValue(
            '/LocalSite/StorageElements/ProxyProtocols', "").split(',')
        result = self.resources.getAccessProtocols(name)
        if result['OK']:
            ap = result['Value'][0]
            useProxy = (self.resources.getAccessProtocolValue(
                ap, "Protocol", "UnknownProtocol") in proxiedProtocols)

        #print "Proxy", name, proxiedProtocols, \
        #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

        if not useProxy:
            useProxy = gConfig.getValue(
                '/LocalSite/StorageElements/%s/UseProxy' % name, False)
        if not useProxy:
            useProxy = self.opHelper.getValue(
                '/Services/StorageElements/%s/UseProxy' % name, False)

        self.valid = True
        if protocols == None:
            res = StorageFactory(useProxy).getStorages(name, protocolList=[])
        else:
            res = StorageFactory(useProxy).getStorages(name,
                                                       protocolList=protocols)
        if not res['OK']:
            self.valid = False
            self.name = name
            self.errorReason = res['Message']
        else:
            factoryDict = res['Value']
            self.name = factoryDict['StorageName']
            self.options = factoryDict['StorageOptions']
            self.localProtocols = factoryDict['LocalProtocols']
            self.remoteProtocols = factoryDict['RemoteProtocols']
            self.storages = factoryDict['StorageObjects']
            self.protocolOptions = factoryDict['ProtocolOptions']
            self.turlProtocols = factoryDict['TurlProtocols']

        self.log = gLogger.getSubLogger("SE[%s]" % self.name)

        self.readMethods = [
            'getFile', 'getAccessUrl', 'getTransportURL', 'prestageFile',
            'prestageFileStatus', 'getDirectory'
        ]

        self.writeMethods = [
            'retransferOnlineFile', 'putFile', 'replicateFile', 'pinFile',
            'releaseFile', 'createDirectory', 'putDirectory'
        ]

        self.removeMethods = ['removeFile', 'removeDirectory']

        self.checkMethods = [
            'exists',
            'getDirectoryMetadata',
            'getDirectorySize',
            'getFileSize',
            'getFileMetadata',
            'listDirectory',
            'isDirectory',
            'isFile',
        ]

        self.okMethods = [
            'getLocalProtocols', 'getPfnForProtocol', 'getPfnForLfn',
            'getPfnPath', 'getProtocols', 'getRemoteProtocols',
            'getStorageElementName', 'getStorageElementOption',
            'getStorageParameters', 'isLocalSE'
        ]

        self.__resourceStatus = ResourceStatus()
Exemplo n.º 23
0
if site:
  res = gConfig.getOptionsDict( '/Resources/Sites/LCG/%s' % site )
  if not res[ 'OK' ]:
    gLogger.error( 'The provided site (%s) is not known.' % site )
    DIRAC.exit( -1 )
  ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )
if not ses:
  gLogger.error( 'There were no SEs provided' )
  DIRAC.exit()

readAllowed = []
writeAllowed = []
checkAllowed = []
removeAllowed = []

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus( ses )
if not res[ 'OK' ]:
  gLogger.error( 'Storage Element %s does not exist' % ses )
  DIRAC.exit( -1 )

reason = 'Forced with dirac-admin-allow-se by %s' % userName

for se, seOptions in res[ 'Value' ].items():

  resW = resC = resR = { 'OK' : False }


  # InActive is used on the CS model, Banned is the equivalent in RSS
  if read and seOptions.has_key( 'ReadAccess' ):
Exemplo n.º 24
0
    DIRAC.exit()

STATUS_TYPES = ["ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess"]
ALLOWED_STATUSES = ["Unknown", "InActive", "Banned", "Probing", "Degraded"]

statusAllowedDict = {}
for statusType in STATUS_TYPES:
    statusAllowedDict[statusType] = []

statusFlagDict = {}
statusFlagDict['ReadAccess'] = read
statusFlagDict['WriteAccess'] = write
statusFlagDict['CheckAccess'] = check
statusFlagDict['RemoveAccess'] = remove

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus(ses)
if not res['OK']:
    gLogger.error('Storage Element %s does not exist' % ses)
    DIRAC.exit(-1)

reason = 'Forced with dirac-admin-allow-se by %s' % userName

for se, seOptions in res['Value'].items():

    resW = resC = resR = {'OK': False}

    # InActive is used on the CS model, Banned is the equivalent in RSS
    for statusType in STATUS_TYPES:
        if statusFlagDict[statusType]:
Exemplo n.º 25
0
class StorageElement:
    """
  .. class:: StorageElement

  common interface to the grid storage element
  """
    def __init__(self, name, protocols=None, vo=None):
        """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

        self.vo = vo
        if not vo:
            result = getVOfromProxyGroup()
            if not result['OK']:
                return result
            self.vo = result['Value']
        self.opHelper = Operations(vo=self.vo)
        self.resources = Resources(vo=self.vo)

        proxiedProtocols = gConfig.getValue(
            '/LocalSite/StorageElements/ProxyProtocols', "").split(',')
        result = self.resources.getAccessProtocols(name)
        if result['OK']:
            ap = result['Value'][0]
            useProxy = (self.resources.getAccessProtocolValue(
                ap, "Protocol", "UnknownProtocol") in proxiedProtocols)

        #print "Proxy", name, proxiedProtocols, \
        #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

        if not useProxy:
            useProxy = gConfig.getValue(
                '/LocalSite/StorageElements/%s/UseProxy' % name, False)
        if not useProxy:
            useProxy = self.opHelper.getValue(
                '/Services/StorageElements/%s/UseProxy' % name, False)

        self.valid = True
        if protocols == None:
            res = StorageFactory(useProxy).getStorages(name, protocolList=[])
        else:
            res = StorageFactory(useProxy).getStorages(name,
                                                       protocolList=protocols)
        if not res['OK']:
            self.valid = False
            self.name = name
            self.errorReason = res['Message']
        else:
            factoryDict = res['Value']
            self.name = factoryDict['StorageName']
            self.options = factoryDict['StorageOptions']
            self.localProtocols = factoryDict['LocalProtocols']
            self.remoteProtocols = factoryDict['RemoteProtocols']
            self.storages = factoryDict['StorageObjects']
            self.protocolOptions = factoryDict['ProtocolOptions']
            self.turlProtocols = factoryDict['TurlProtocols']

        self.log = gLogger.getSubLogger("SE[%s]" % self.name)

        self.readMethods = [
            'getFile', 'getAccessUrl', 'getTransportURL', 'prestageFile',
            'prestageFileStatus', 'getDirectory'
        ]

        self.writeMethods = [
            'retransferOnlineFile', 'putFile', 'replicateFile', 'pinFile',
            'releaseFile', 'createDirectory', 'putDirectory'
        ]

        self.removeMethods = ['removeFile', 'removeDirectory']

        self.checkMethods = [
            'exists',
            'getDirectoryMetadata',
            'getDirectorySize',
            'getFileSize',
            'getFileMetadata',
            'listDirectory',
            'isDirectory',
            'isFile',
        ]

        self.okMethods = [
            'getLocalProtocols', 'getPfnForProtocol', 'getPfnForLfn',
            'getPfnPath', 'getProtocols', 'getRemoteProtocols',
            'getStorageElementName', 'getStorageElementOption',
            'getStorageParameters', 'isLocalSE'
        ]

        self.__resourceStatus = ResourceStatus()

    def dump(self):
        """ Dump to the logger a summary of the StorageElement items. """
        self.log.info("dump: Preparing dump for StorageElement %s." %
                      self.name)
        if not self.valid:
            self.log.error("dump: Failed to create StorageElement plugins.",
                           self.errorReason)
            return
        i = 1
        outStr = "\n\n============ Options ============\n"
        for key in sorted(self.options):
            outStr = "%s%s: %s\n" % (outStr, key.ljust(15), self.options[key])

        for storage in self.storages:
            outStr = "%s============Protocol %s ============\n" % (outStr, i)
            res = storage.getParameters()
            storageParameters = res['Value']
            for key in sorted(storageParameters):
                outStr = "%s%s: %s\n" % (outStr, key.ljust(15),
                                         storageParameters[key])
            i = i + 1
        self.log.info(outStr)

    #################################################################################################
    #
    # These are the basic get functions for storage configuration
    #

    def getStorageElementName(self):
        """ SE name getter """
        self.log.verbose(
            "getStorageElementName: The Storage Element name is %s." %
            self.name)
        return S_OK(self.name)

    def getChecksumType(self):
        """ get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
        global /Resources/StorageElements/ChecksumType
    """
        return S_OK(
            str(
                gConfig.getValue("/Resources/StorageElements/ChecksumType",
                                 "ADLER32")).upper() if "ChecksumType" not in
            self.options else str(self.options["ChecksumType"]).upper())

    def getStatus(self):
        """
     Return Status of the SE, a dictionary with:
      - Read: True (is allowed), False (it is not allowed)
      - Write: True (is allowed), False (it is not allowed)
      - Remove: True (is allowed), False (it is not allowed)
      - Check: True (is allowed), False (it is not allowed).
      NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
      - DiskSE: True if TXDY with Y > 0 (defaults to True)
      - TapeSE: True if TXDY with X > 0 (defaults to False)
      - TotalCapacityTB: float (-1 if not defined)
      - DiskCacheTB: float (-1 if not defined)
    """
        retDict = {}
        if not self.valid:
            retDict['Read'] = False
            retDict['Write'] = False
            retDict['Remove'] = False
            retDict['Check'] = False
            retDict['DiskSE'] = False
            retDict['TapeSE'] = False
            retDict['TotalCapacityTB'] = -1
            retDict['DiskCacheTB'] = -1
            return S_OK(retDict)

        # If nothing is defined in the CS Access is allowed
        # If something is defined, then it must be set to Active
        retDict['Read'] = self.__resourceStatus.isUsableStorage(
            self.name, 'ReadAccess')
        retDict['Write'] = self.__resourceStatus.isUsableStorage(
            self.name, 'WriteAccess')
        retDict['Remove'] = self.__resourceStatus.isUsableStorage(
            self.name, 'RemoveAccess')
        if retDict['Read']:
            retDict['Check'] = True
        else:
            retDict['Check'] = self.__resourceStatus.isUsableStorage(
                self.name, 'CheckAccess')
        diskSE = True
        tapeSE = False
        if 'SEType' in self.options:
            # Type should follow the convention TXDY
            seType = self.options['SEType']
            diskSE = re.search('D[1-9]', seType) != None
            tapeSE = re.search('T[1-9]', seType) != None
        retDict['DiskSE'] = diskSE
        retDict['TapeSE'] = tapeSE
        try:
            retDict['TotalCapacityTB'] = float(self.options['TotalCapacityTB'])
        except Exception:
            retDict['TotalCapacityTB'] = -1
        try:
            retDict['DiskCacheTB'] = float(self.options['DiskCacheTB'])
        except Exception:
            retDict['DiskCacheTB'] = -1

        return S_OK(retDict)

    def isValid(self, operation=''):
        """ check CS/RSS statuses for :operation:

    :param str operation: operation name
    """
        self.log.debug(
            "isValid: Determining whether the StorageElement %s is valid for %s"
            % (self.name, operation))

        if (not operation) or (operation in self.okMethods):
            return S_OK()

        if not self.valid:
            self.log.error("isValid: Failed to create StorageElement plugins.",
                           self.errorReason)
            return S_ERROR(self.errorReason)
        # Determine whether the StorageElement is valid for checking, reading, writing
        res = self.getStatus()
        if not res['OK']:
            self.log.error("Could not call getStatus")
            return S_ERROR(
                "StorageElement.isValid could not call the getStatus method")
        checking = res['Value']['Check']
        reading = res['Value']['Read']
        writing = res['Value']['Write']
        removing = res['Value']['Remove']

        # Determine whether the requested operation can be fulfilled
        if (not operation) and (not reading) and (not writing) and (
                not checking):
            self.log.error(
                "isValid: Read, write and check access not permitted.")
            return S_ERROR(
                "StorageElement.isValid: Read, write and check access not permitted."
            )

        # The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
        if (operation in self.readMethods) or (operation.lower()
                                               in ('read', 'readaccess')):
            operation = 'ReadAccess'
        elif operation in self.writeMethods or (operation.lower()
                                                in ('write', 'writeaccess')):
            operation = 'WriteAccess'
        elif operation in self.removeMethods or (operation.lower()
                                                 in ('remove',
                                                     'removeaccess')):
            operation = 'RemoveAccess'
        elif operation in self.checkMethods or (operation.lower()
                                                in ('check', 'checkaccess')):
            operation = 'CheckAccess'
        else:
            self.log.error("isValid: The supplied operation is not known.",
                           operation)
            return S_ERROR(
                "StorageElement.isValid: The supplied operation is not known.")
        self.log.debug("in isValid check the operation: %s " % operation)
        # Check if the operation is valid
        if operation == 'CheckAccess':
            if not reading:
                if not checking:
                    self.log.error(
                        "isValid: Check access not currently permitted.")
                    return S_ERROR(
                        "StorageElement.isValid: Check access not currently permitted."
                    )
        if operation == 'ReadAccess':
            if not reading:
                self.log.error("isValid: Read access not currently permitted.")
                return S_ERROR(
                    "StorageElement.isValid: Read access not currently permitted."
                )
        if operation == 'WriteAccess':
            if not writing:
                self.log.error(
                    "isValid: Write access not currently permitted.")
                return S_ERROR(
                    "StorageElement.isValid: Write access not currently permitted."
                )
        if operation == 'RemoveAccess':
            if not removing:
                self.log.error(
                    "isValid: Remove access not currently permitted.")
                return S_ERROR(
                    "StorageElement.isValid: Remove access not currently permitted."
                )
        return S_OK()

    def getProtocols(self):
        """ Get the list of all the protocols defined for this Storage Element
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        self.log.verbose("getProtocols: Obtaining all protocols.")
        allProtocols = self.localProtocols + self.remoteProtocols
        return S_OK(allProtocols)

    def getRemoteProtocols(self):
        """ Get the list of all the remote access protocols defined for this Storage Element
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        self.log.verbose(
            "getRemoteProtocols: Obtaining remote protocols for %s." %
            self.name)
        return S_OK(self.remoteProtocols)

    def getLocalProtocols(self):
        """ Get the list of all the local access protocols defined for this Storage Element
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        self.log.verbose(
            "getLocalProtocols: Obtaining local protocols for %s." % self.name)
        return S_OK(self.localProtocols)

    def getStorageElementOption(self, option):
        """ Get the value for the option supplied from self.options
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        self.log.verbose(
            "getStorageElementOption: Obtaining %s option for Storage Element %s."
            % (option, self.name))
        if option in self.options:
            optionValue = self.options[option]
            return S_OK(optionValue)
        else:
            errStr = "getStorageElementOption: Option not defined for SE."
            self.log.error(errStr, "%s for %s" % (option, self.name))
            return S_ERROR(errStr)

    def getStorageParameters(self, protocol):
        """ Get protocol specific options
    """
        self.log.verbose(
            "getStorageParameters: Obtaining storage parameters for %s protocol %s."
            % (self.name, protocol))
        res = self.getProtocols()
        if not res['OK']:
            return res
        availableProtocols = res['Value']
        if not protocol in availableProtocols:
            errStr = "getStorageParameters: Requested protocol not available for SE."
            self.log.warn(errStr, '%s for %s' % (protocol, self.name))
            return S_ERROR(errStr)
        for storage in self.storages:
            res = storage.getParameters()
            storageParameters = res['Value']
            if storageParameters['ProtocolName'] == protocol:
                return S_OK(storageParameters)
        errStr = "getStorageParameters: Requested protocol supported but no object found."
        self.log.error(errStr, "%s for %s" % (protocol, self.name))
        return S_ERROR(errStr)

    def isLocalSE(self):
        """ Test if the Storage Element is local in the current context
    """
        import DIRAC
        self.log.verbose("isLocalSE: Determining whether %s is a local SE." %
                         self.name)
        localSEs = getSEsForSite(DIRAC.siteName())['Value']
        if self.name in localSEs:
            return S_OK(True)
        else:
            return S_OK(False)

    #################################################################################################
    #
    # These are the basic get functions for pfn manipulation
    #

    def getPfnForProtocol(self, pfn, protocol, withPort=True):
        """ Transform the input pfn into another with the given protocol for the Storage Element.
    """
        res = self.getProtocols()
        if not res['OK']:
            return res
        if type(protocol) == StringType:
            protocols = [protocol]
        elif type(protocol) == ListType:
            protocols = protocol
        else:
            errStr = "getPfnForProtocol: Supplied protocol must be string or list of strings."
            self.log.error(errStr, "%s %s" % (protocol, self.name))
            return S_ERROR(errStr)
        availableProtocols = res['Value']
        protocolsToTry = []
        for protocol in protocols:
            if protocol in availableProtocols:
                protocolsToTry.append(protocol)
            else:
                errStr = "getPfnForProtocol: Requested protocol not available for SE."
                self.log.debug(errStr, '%s for %s' % (protocol, self.name))
        if not protocolsToTry:
            errStr = "getPfnForProtocol: None of the requested protocols were available for SE."
            self.log.error(errStr, '%s for %s' % (protocol, self.name))
            return S_ERROR(errStr)
        # Check all available storages for required protocol then contruct the PFN
        for storage in self.storages:
            res = storage.getParameters()
            if res['Value']['ProtocolName'] in protocolsToTry:
                res = pfnparse(pfn)
                if res['OK']:
                    res = storage.getProtocolPfn(res['Value'], withPort)
                    if res['OK']:
                        return res
        errStr = "getPfnForProtocol: Failed to get PFN for requested protocols."
        self.log.error(errStr, "%s for %s" % (protocols, self.name))
        return S_ERROR(errStr)

    def getPfnPath(self, pfn):
        """  Get the part of the PFN path below the basic storage path.
         This path must coincide with the LFN of the file in order to be compliant with the LHCb conventions.
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        res = pfnparse(pfn)
        if not res['OK']:
            return res
        fullPfnPath = '%s/%s' % (res['Value']['Path'],
                                 res['Value']['FileName'])

        # Check all available storages and check whether the pfn is for that protocol
        pfnPath = ''
        for storage in self.storages:
            res = storage.isPfnForProtocol(pfn)
            if res['OK']:
                if res['Value']:
                    res = storage.getParameters()
                    saPath = res['Value']['Path']
                    if not saPath:
                        # If the sa path doesn't exist then the pfn path is the entire string
                        pfnPath = fullPfnPath
                    else:
                        if re.search(saPath, fullPfnPath):
                            # Remove the sa path from the fullPfnPath
                            pfnPath = fullPfnPath.replace(saPath, '')
            if pfnPath:
                return S_OK(pfnPath)
        # This should never happen. DANGER!!
        errStr = "getPfnPath: Failed to get the pfn path for any of the protocols!!"
        self.log.error(errStr)
        return S_ERROR(errStr)

    def getPfnForLfn(self, lfn):
        """ Get the full PFN constructed from the LFN.
    """
        if not self.valid:
            return S_ERROR(self.errorReason)
        for storage in self.storages:
            res = storage.getPFNBase()
            if res['OK']:
                fullPath = "%s%s" % (res['Value'], lfn)
                return S_OK(fullPath)
        # This should never happen. DANGER!!
        errStr = "getPfnForLfn: Failed to get the full pfn for any of the protocols!!"
        self.log.error(errStr)
        return S_ERROR(errStr)

    def getPFNBase(self):
        """ Get the base to construct a PFN
    """
        if not self.storages:
            return S_ERROR('No storages defined')
        for storage in self.storages:
            result = storage.getPFNBase()
            if result['OK']:
                return result

        return result

    ###########################################################################################
    #
    # This is the generic wrapper for file operations
    #

    def retransferOnlineFile(self, pfn, singleFile=False):
        """ execcute 'retransferOnlineFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'retransferOnlineFile')

    def exists(self, pfn, singleFile=False):
        """ execute 'exists' operation  """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'exists')

    def isFile(self, pfn, singleFile=False):
        """ execute 'isFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'isFile')

    def getFile(self, pfn, localPath=False, singleFile=False):
        """ execute 'getFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'getFile', {
            'localPath': localPath
        })

    def putFile(self, pfn, sourceSize=0, singleFile=False):
        """ execute 'putFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'putFile', {
            'sourceSize': sourceSize
        })

    def replicateFile(self, pfn, sourceSize=0, singleFile=False):
        """ execute 'putFile' as replicate """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'putFile', {
            'sourceSize': sourceSize
        })

    def getFileMetadata(self, pfn, singleFile=False):
        """ execute 'getFileMetadata' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'getFileMetadata')

    def getFileSize(self, pfn, singleFile=False):
        """ execute 'getFileSize' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'getFileSize')

    def getAccessUrl(self, pfn, protocol=False, singleFile=False):
        """ execute 'getTransportURL' operation """
        if not protocol:
            protocols = self.turlProtocols
        else:
            protocols = [protocol]
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'getTransportURL', {
            'protocols': protocols
        })

    def removeFile(self, pfn, singleFile=False):
        """ execute 'removeFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'removeFile')

    def prestageFile(self, pfn, lifetime=86400, singleFile=False):
        """ execute 'prestageFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'prestageFile', {
            'lifetime': lifetime
        })

    def prestageFileStatus(self, pfn, singleFile=False):
        """ execute 'prestageFileStatus' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'prestageFileStatus')

    def pinFile(self, pfn, lifetime=60 * 60 * 24, singleFile=False):
        """ execute 'pinFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'pinFile', {
            'lifetime': lifetime
        })

    def releaseFile(self, pfn, singleFile=False):
        """ execute 'releaseFile' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleFile)](pfn, 'releaseFile')

    def isDirectory(self, pfn, singleDirectory=False):
        """ execute 'isDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'isDirectory')

    def getDirectoryMetadata(self, pfn, singleDirectory=False):
        """ execute 'getDirectoryMetadata' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'getDirectoryMetadata')

    def getDirectorySize(self, pfn, singleDirectory=False):
        """ execute 'getDirectorySize' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'getDirectorySize')

    def listDirectory(self, pfn, singleDirectory=False):
        """ execute 'listDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'listDirectory')

    def removeDirectory(self, pfn, recursive=False, singleDirectory=False):
        """ execute 'removeDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'removeDirectory', {
            'recursive': recursive
        })

    def createDirectory(self, pfn, singleDirectory=False):
        """ execute 'createDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'createDirectory')

    def putDirectory(self, pfn, singleDirectory=False):
        """ execute 'putDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'putDirectory')

    def getDirectory(self, pfn, localPath=False, singleDirectory=False):
        """ execute 'getDirectory' operation """
        return {
            True: self.__executeSingleFile,
            False: self.__executeFunction
        }[bool(singleDirectory)](pfn, 'getDirectory', {
            'localPath': localPath
        })

    def __executeSingleFile(self, pfn, operation, arguments=None):
        """ execute for single file """
        if arguments == None:
            res = self.__executeFunction(pfn, operation, {})
        else:
            res = self.__executeFunction(pfn, operation, arguments)
        if type(pfn) == ListType:
            pfn = pfn[0]
        elif type(pfn) == DictType:
            pfn = pfn.keys()[0]
        if not res['OK']:
            return res
        elif pfn in res['Value']['Failed']:
            errorMessage = res['Value']['Failed'][pfn]
            return S_ERROR(errorMessage)
        else:
            return S_OK(res['Value']['Successful'][pfn])

    def __executeFunction(self, pfn, method, argsDict=None):
        """
        'pfn' is the physical file name (as registered in the LFC)
        'method' is the functionality to be executed
    """
        ## default args  = no args
        argsDict = argsDict if argsDict else {}
        if type(pfn) in StringTypes:
            pfns = {pfn: False}
        elif type(pfn) == ListType:
            pfns = {}
            for url in pfn:
                pfns[url] = False
        elif type(pfn) == DictType:
            pfns = pfn.copy()
        else:
            errStr = "__executeFunction: Supplied pfns must be string or list of strings or a dictionary."
            self.log.error(errStr)
            return S_ERROR(errStr)

        if not pfns:
            self.log.verbose("__executeFunction: No pfns supplied.")
            return S_OK({'Failed': {}, 'Successful': {}})
        self.log.verbose(
            "__executeFunction: Attempting to perform '%s' operation with %s pfns."
            % (method, len(pfns)))

        res = self.isValid(operation=method)
        if not res['OK']:
            return res
        else:
            if not self.valid:
                return S_ERROR(self.errorReason)

        successful = {}
        failed = {}
        localSE = self.isLocalSE()['Value']
        # Try all of the storages one by one
        for storage in self.storages:
            # Determine whether to use this storage object
            res = storage.getParameters()
            useProtocol = True
            if not res['OK']:
                self.log.error(
                    "__executeFunction: Failed to get storage parameters.",
                    "%s %s" % (self.name, res['Message']))
                useProtocol = False
            else:
                protocolName = res['Value']['ProtocolName']
                if not pfns:
                    useProtocol = False
                    self.log.verbose(
                        "__executeFunction: No pfns to be attempted for %s protocol."
                        % protocolName)
                elif not (protocolName
                          in self.remoteProtocols) and not localSE:
                    # If the SE is not local then we can't use local protocols
                    useProtocol = False
                    self.log.verbose(
                        "__executeFunction: Protocol not appropriate for use: %s."
                        % protocolName)
            if useProtocol:
                self.log.verbose(
                    "__executeFunction: Generating %s protocol PFNs for %s." %
                    (len(pfns), protocolName))
                res = self.__generatePfnDict(pfns, storage)
                pfnDict = res['Value']
                failed.update(res['Failed'])
                if not len(pfnDict) > 0:
                    self.log.verbose(
                        "__executeFunction No pfns generated for protocol %s."
                        % protocolName)
                else:
                    self.log.verbose(
                        "__executeFunction: Attempting to perform '%s' for %s physical files"
                        % (method, len(pfnDict)))
                    fcn = None
                    if hasattr(storage, method) and callable(
                            getattr(storage, method)):
                        fcn = getattr(storage, method)
                    if not fcn:
                        return S_ERROR(
                            "__executeFunction: unable to invoke %s, it isn't a member function of storage"
                        )

                    pfnsToUse = {}
                    for pfn in pfnDict:
                        pfnsToUse[pfn] = pfns[pfnDict[pfn]]

                    res = fcn(pfnsToUse, **argsDict)

                    if not res['OK']:
                        errStr = "__executeFunction: Completely failed to perform %s." % method
                        self.log.error(
                            errStr, '%s for protocol %s: %s' %
                            (self.name, protocolName, res['Message']))
                        for pfn in pfnDict.values():
                            if pfn not in failed:
                                failed[pfn] = ''
                            failed[pfn] = "%s %s" % (failed[pfn],
                                                     res['Message'])
                    else:
                        for protocolPfn, pfn in pfnDict.items():
                            if protocolPfn not in res['Value']['Successful']:
                                if pfn not in failed:
                                    failed[pfn] = ''
                                if protocolPfn in res['Value']['Failed']:
                                    failed[pfn] = "%s %s" % (
                                        failed[pfn],
                                        res['Value']['Failed'][protocolPfn])
                                else:
                                    failed[pfn] = "%s %s" % (
                                        failed[pfn],
                                        'No error returned from plug-in')
                            else:
                                successful[pfn] = res['Value']['Successful'][
                                    protocolPfn]
                                if pfn in failed:
                                    failed.pop(pfn)
                                pfns.pop(pfn)

        return S_OK({'Failed': failed, 'Successful': successful})

    def __generatePfnDict(self, pfns, storage):
        """ whatever, it creates PFN dict  """
        pfnDict = {}
        failed = {}
        for pfn in pfns:
            res = pfnparse(pfn)
            if not res['OK']:
                errStr = "__generatePfnDict: Failed to parse supplied PFN."
                self.log.error(errStr, "%s: %s" % (pfn, res['Message']))
                if pfn not in failed:
                    failed[pfn] = ''
                failed[pfn] = "%s %s" % (failed[pfn], errStr)
            else:
                res = storage.getProtocolPfn(res['Value'], True)
                if not res['OK']:
                    errStr = "__generatePfnDict %s." % res['Message']
                    self.log.error(errStr, 'for %s' % (pfn))
                    if pfn not in failed:
                        failed[pfn] = ''
                    failed[pfn] = "%s %s" % (failed[pfn], errStr)
                else:
                    pfnDict[res['Value']] = pfn
        res = S_OK(pfnDict)
        res['Failed'] = failed
        return res
Exemplo n.º 26
0
class StorageFactory:
    def __init__(self, useProxy=False, vo=None):

        self.valid = True
        self.proxy = False
        self.proxy = useProxy
        self.resourceStatus = ResourceStatus()
        self.resourcesHelper = Resources(vo=vo)

    ###########################################################################################
    #
    # Below are public methods for obtaining storage objects
    #

    def getStorageName(self, initialName):
        return self._getConfigStorageName(initialName)

    def getStorage(self, parameterDict):
        """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
        # The storage name must be supplied.
        if parameterDict.has_key("StorageName"):
            storageName = parameterDict["StorageName"]
        else:
            errStr = "StorageFactory.getStorage: StorageName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # ProtocolName must be supplied otherwise nothing with work.
        if parameterDict.has_key("ProtocolName"):
            protocolName = parameterDict["ProtocolName"]
        else:
            errStr = "StorageFactory.getStorage: ProtocolName must be supplied"
            gLogger.error(errStr)
            return S_ERROR(errStr)

        # The other options need not always be specified
        if parameterDict.has_key("Protocol"):
            protocol = parameterDict["Protocol"]
        else:
            protocol = ""

        if parameterDict.has_key("Port"):
            port = parameterDict["Port"]
        else:
            port = ""

        if parameterDict.has_key("Host"):
            host = parameterDict["Host"]
        else:
            host = ""

        if parameterDict.has_key("Path"):
            path = parameterDict["Path"]
        else:
            path = ""

        if parameterDict.has_key("SpaceToken"):
            spaceToken = parameterDict["SpaceToken"]
        else:
            spaceToken = ""

        if parameterDict.has_key("WSUrl"):
            wsPath = parameterDict["WSUrl"]
        else:
            wsPath = ""

        return self.__generateStorageObject(
            storageName, protocolName, protocol, path, host, port, spaceToken, wsPath, parameterDict
        )

    def getStorages(self, storageName, protocolList=[]):
        """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'protocolList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
        self.remoteProtocols = []
        self.localProtocols = []
        self.name = ""
        self.options = {}
        self.protocolDetails = []
        self.storages = []

        # Get the name of the storage provided
        res = self._getConfigStorageName(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        storageName = res["Value"]
        self.name = storageName

        # Get the options defined in the CS for this storage
        res = self._getConfigStorageOptions(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        self.options = res["Value"]

        # Get the protocol specific details
        res = self._getConfigStorageProtocols(storageName)
        if not res["OK"]:
            self.valid = False
            return res
        self.protocolDetails = res["Value"]

        requestedLocalProtocols = []
        requestedRemoteProtocols = []
        requestedProtocolDetails = []
        turlProtocols = []
        # Generate the protocol specific plug-ins
        self.storages = []
        for protocolDict in self.protocolDetails:
            protocolName = protocolDict["ProtocolName"]
            protocolRequested = True
            if protocolList:
                if protocolName not in protocolList:
                    protocolRequested = False
            if protocolRequested:
                protocol = protocolDict["Protocol"]
                host = protocolDict["Host"]
                path = protocolDict["Path"]
                port = protocolDict["Port"]
                spaceToken = protocolDict["SpaceToken"]
                wsUrl = protocolDict["WSUrl"]
                res = self.__generateStorageObject(
                    storageName,
                    protocolName,
                    protocol,
                    path=path,
                    host=host,
                    port=port,
                    spaceToken=spaceToken,
                    wsUrl=wsUrl,
                    parameters=protocolDict,
                )
                if res["OK"]:
                    self.storages.append(res["Value"])
                    if protocolName in self.localProtocols:
                        turlProtocols.append(protocol)
                        requestedLocalProtocols.append(protocolName)
                    if protocolName in self.remoteProtocols:
                        requestedRemoteProtocols.append(protocolName)
                    requestedProtocolDetails.append(protocolDict)
                else:
                    gLogger.info(res["Message"])

        if len(self.storages) > 0:
            resDict = {}
            resDict["StorageName"] = self.name
            resDict["StorageOptions"] = self.options
            resDict["StorageObjects"] = self.storages
            resDict["LocalProtocols"] = requestedLocalProtocols
            resDict["RemoteProtocols"] = requestedRemoteProtocols
            resDict["ProtocolOptions"] = requestedProtocolDetails
            resDict["TurlProtocols"] = turlProtocols
            return S_OK(resDict)
        else:
            errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
            gLogger.error(errStr, self.name)
            return S_ERROR(errStr)

    ###########################################################################################
    #
    # Below are internal methods for obtaining section/option/value configuration
    #

    def _getConfigStorageName(self, storageName):
        """
      This gets the name of the storage the configuration service.
      If the storage is an alias for another the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result["OK"]:
            errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
            gLogger.error(errStr, result["Message"])
            return S_ERROR(errStr)
        if not result["Value"]:
            errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
            gLogger.error(errStr, storageName)
            return S_ERROR(errStr)
        if "Alias" in res["Value"]:
            configPath = "%s/%s/Alias" % (self.rootConfigPath, storageName)
            aliasName = gConfig.getValue(configPath)
            result = self._getConfigStorageName(aliasName)
            if not result["OK"]:
                errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
                gLogger.error(errStr, configPath)
                return S_ERROR(errStr)
            resolvedName = result["Value"]
        else:
            resolvedName = storageName
        return S_OK(resolvedName)

    def _getConfigStorageOptions(self, storageName):
        """ Get the options associated to the StorageElement as defined in the CS
    """

        result = self.resourcesHelper.getStorageElementOptionsDict(storageName)
        if not result["OK"]:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
            gLogger.error(errStr, "%s: %s" % (storageName, result["Message"]))
            return S_ERROR(errStr)
        optionsDict = result["Value"]

        result = self.resourceStatus.getStorageStatus(storageName, "ReadAccess")
        if not result["OK"]:
            errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
            gLogger.error(errStr, "%s: %s" % (storageName, result["Message"]))
            return S_ERROR(errStr)
        # optionsDict.update( result[ 'Value' ][ storageName ] )

        return S_OK(optionsDict)

    def _getConfigStorageProtocols(self, storageName):
        """ Protocol specific information is present as sections in the Storage configuration
    """
        result = getSiteForResource(storageName)
        if not result["OK"]:
            return result
        site = result["Value"]
        result = self.resourcesHelper.getEligibleNodes("AccessProtocol", {"Site": site, "Resource": storageName})
        if not result["OK"]:
            return result
        nodesList = result["Value"]
        protocols = []
        for node in nodesList:
            protocols.append(node)
        protocolDetails = []
        for protocol in protocols:
            result = self._getConfigStorageProtocolDetails(protocol)
            if not result["OK"]:
                return result
            protocolDetails.append(result["Value"])
        self.protocols = self.localProtocols + self.remoteProtocols
        return S_OK(protocolDetails)

    def _getConfigStorageProtocolDetails(self, protocol):
        """
      Parse the contents of the protocol block
    """

        result = self.resourcesHelper.getAccessProtocolOptionsDict(protocol)
        if not result["OK"]:
            return result
        optionsDict = result["Value"]

        # We must have certain values internally even if not supplied in CS
        protocolDict = {
            "Access": "",
            "Host": "",
            "Path": "",
            "Port": "",
            "Protocol": "",
            "ProtocolName": "",
            "SpaceToken": "",
            "WSUrl": "",
        }
        for option in optionsDict:
            protocolDict[option] = optionsDict[option]

        # Now update the local and remote protocol lists.
        # A warning will be given if the Access option is not set.
        if protocolDict["Access"] == "remote":
            self.remoteProtocols.append(protocolDict["ProtocolName"])
        elif protocolDict["Access"] == "local":
            self.localProtocols.append(protocolDict["ProtocolName"])
        else:
            errStr = (
                "StorageFactory.__getProtocolDetails: The 'Access' option for %s is neither 'local' or 'remote'."
                % protocol
            )
            gLogger.warn(errStr)

        # The ProtocolName option must be defined
        if not protocolDict["ProtocolName"]:
            errStr = "StorageFactory.__getProtocolDetails: 'ProtocolName' option is not defined."
            gLogger.error(errStr, "%s" % protocol)
            return S_ERROR(errStr)
        return S_OK(protocolDict)

    ###########################################################################################
    #
    # Below is the method for obtaining the object instantiated for a provided storage configuration
    #

    def __generateStorageObject(
        self,
        storageName,
        protocolName,
        protocol,
        path=None,
        host=None,
        port=None,
        spaceToken=None,
        wsUrl=None,
        parameters={},
    ):

        storageType = protocolName
        if self.proxy:
            storageType = "Proxy"

        moduleRootPaths = getInstalledExtensions()
        moduleLoaded = False
        path = path.rstrip("/")
        if not path:
            path = "/"
        for moduleRootPath in moduleRootPaths:
            if moduleLoaded:
                break
            gLogger.verbose("Trying to load from root path %s" % moduleRootPath)
            moduleFile = os.path.join(rootPath, moduleRootPath, "Resources", "Storage", "%sStorage.py" % storageType)
            gLogger.verbose("Looking for file %s" % moduleFile)
            if not os.path.isfile(moduleFile):
                continue
            try:
                # This inforces the convention that the plug in must be named after the protocol
                moduleName = "%sStorage" % (storageType)
                storageModule = __import__(
                    "%s.Resources.Storage.%s" % (moduleRootPath, moduleName), globals(), locals(), [moduleName]
                )
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to import %s: %s" % (storageName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            try:
                evalString = "storageModule.%s(storageName,protocol,path,host,port,spaceToken,wsUrl)" % moduleName
                storage = eval(evalString)
                if not storage.isOK():
                    errStr = "StorageFactory._generateStorageObject: Failed to instantiate storage plug in."
                    gLogger.error(errStr, "%s" % (moduleName))
                    return S_ERROR(errStr)
            except Exception, x:
                errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s(): %s" % (moduleName, x)
                gLogger.exception(errStr)
                return S_ERROR(errStr)

            # Set extra parameters if any
            if parameters:
                result = storage.setParameters(parameters)
                if not result["OK"]:
                    return result

            # If use proxy, keep the original protocol name
            if self.proxy:
                storage.protocolName = protocolName
            return S_OK(storage)
Exemplo n.º 27
0
  DIRAC.exit()

STATUS_TYPES = [ "ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess" ]
ALLOWED_STATUSES = [ "Unknown", "InActive", "Banned", "Probing", "Degraded" ]

statusAllowedDict = {}
for statusType in STATUS_TYPES:
  statusAllowedDict[statusType] = []

statusFlagDict = {}
statusFlagDict['ReadAccess'] = read
statusFlagDict['WriteAccess'] = write
statusFlagDict['CheckAccess'] = check
statusFlagDict['RemoveAccess'] = remove

resourceStatus = ResourceStatus()

res = resourceStatus.getElementStatus( ses, "StorageElement" )
if not res[ 'OK' ]:
  gLogger.error( 'Storage Element %s does not exist' % ses )
  DIRAC.exit( -1 )

reason = 'Forced with dirac-admin-allow-se by %s' % userName

for se, seOptions in res[ 'Value' ].iteritems():

  # InActive is used on the CS model, Banned is the equivalent in RSS
  for statusType in STATUS_TYPES:
    if statusFlagDict[statusType]:
      if seOptions.get( statusType ) == "Active":
        gLogger.notice( '%s status of %s is already Active' % ( statusType, se ) )
Exemplo n.º 28
0
    res = gConfig.getOptionsDict('/Resources/Sites/LCG/%s' % site)
    if not res['OK']:
        gLogger.error('The provided site (%s) is not known.' % site)
        DIRAC.exit(-1)
    ses.extend(res['Value']['SE'].replace(' ', '').split(','))

if not ses:
    gLogger.error('There were no SEs provided')
    DIRAC.exit(-1)

readBanned = []
writeBanned = []
checkBanned = []
removeBanned = []

resourceStatus = ResourceStatus()

res = resourceStatus.getElementStatus(ses, "StorageElement")
if not res['OK']:
    gLogger.error("Storage Element %s does not exist" % ses)
    DIRAC.exit(-1)

reason = 'Forced with dirac-admin-ban-se by %s' % userName

for se, seOptions in res['Value'].items():

    resW = resC = resR = {'OK': False}

    # Eventually, we will get rid of the notion of InActive, as we always write Banned.
    if read and 'ReadAccess' in seOptions:
Exemplo n.º 29
0
class StrategyHandler( object ):
  """
  .. class:: StrategyHandler

  StrategyHandler is a helper class for determining optimal replication tree for given
  source files, their replicas and target storage elements.
  """

  def __init__( self, configSection, bandwidths=None, channels=None, failedFiles=None ):
    """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
    ## save config section
    self.configSection = configSection + "/" + self.__class__.__name__
    ## sublogger
    self.log = gLogger.getSubLogger( "StrategyHandler", child=True )
    self.log.setLevel( gConfig.getValue( self.configSection + "/LogLevel", "DEBUG"  ) )
  
    self.supportedStrategies = [ 'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait' ]
    self.log.debug( "Supported strategies = %s" % ", ".join( self.supportedStrategies ) )
  
    self.sigma = gConfig.getValue( self.configSection + '/HopSigma', 0.0 )
    self.log.debug( "HopSigma = %s" % self.sigma )
    self.schedulingType = gConfig.getValue( self.configSection + '/SchedulingType', 'File' )
    self.log.debug( "SchedulingType = %s" % self.schedulingType )
    self.activeStrategies = gConfig.getValue( self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'] )
    self.log.debug( "ActiveStrategies = %s" % ", ".join( self.activeStrategies ) )
    self.numberOfStrategies = len( self.activeStrategies )
    self.log.debug( "Number of active strategies = %s" % self.numberOfStrategies )
    self.acceptableFailureRate = gConfig.getValue( self.configSection + '/AcceptableFailureRate', 75 )
    self.log.debug( "AcceptableFailureRate = %s" % self.acceptableFailureRate )
    self.acceptableFailedFiles = gConfig.getValue( self.configSection + "/AcceptableFailedFiles", 5 )
    self.log.debug( "AcceptableFailedFiles = %s" % self.acceptableFailedFiles )

    self.bandwidths = bandwidths if bandwidths else {}
    self.channels = channels if channels else {}
    self.failedFiles = failedFiles if failedFiles else {}
    self.chosenStrategy = 0

    # dispatcher
    self.strategyDispatcher = { re.compile("MinimiseTotalWait") : self.__minimiseTotalWait, 
                                re.compile("DynamicThroughput") : self.__dynamicThroughput,
                                re.compile("Simple") : self.__simple, 
                                re.compile("Swarm") : self.__swarm }

    self.resourceStatus = ResourceStatus()

    self.log.debug( "strategyDispatcher entries:" )
    for key, value in self.strategyDispatcher.items():
      self.log.debug( "%s : %s" % ( key.pattern, value.__name__ ) )

    self.log.debug("%s has been constructed" % self.__class__.__name__ )

  def reset( self ):
    """ reset :chosenStrategy: 

    :param self: self reference
    """
    self.chosenStrategy = 0

  def setFailedFiles( self, failedFiles ):
    """ set the failed FTS files counters

    :param self: self reference
    :param failedFiles: observed distinct failed files
    """
    self.failedFiles = failedFiles if failedFiles else {}

  def setBandwiths( self, bandwidths ):
    """ set the bandwidths 

    :param self: self reference
    :param bandwithds: observed througput of active FTS channels
    """
  
    self.bandwidths = bandwidths if bandwidths else {}

  def setChannels( self, channels ):
    """ set the channels
    
    :param self: self reference
    :param channels: active channels queues
    """
    self.channels = channels if channels else {}

  def getSupportedStrategies( self ):
    """ Get supported strategies.

    :param self: self reference
    """    
    return self.supportedStrategies

  def determineReplicationTree( self, sourceSE, targetSEs, replicas, size, strategy = None, sigma = None ):
    """ resolve and find replication tree given source and target storage elements, active replicas, 
    and file size.

    :param self: self reference
    :param str sourceSE: source storage element name
    :param list targetSEs: list of target storage elements
    :param dict replicas: active replicas
    :param int size: fiel size
    :param str strategy: strategy to use
    :param float sigma: hop sigma
    """
    if not strategy:
      strategy = self.__selectStrategy()
    self.log.debug( "determineReplicationTree: will use %s strategy"  % strategy )

    if sigma:
      self.log.debug( "determineReplicationTree: sigma = %s"  % sigma )
      self.sigma = sigma

    # For each strategy implemented an 'if' must be placed here 
    tree = {}
    for reStrategy in self.strategyDispatcher:
      self.log.debug( reStrategy.pattern )
      if reStrategy.search( strategy ):
        if "_" in strategy:
          try:
            self.sigma = float(strategy.split("_")[1])
            self.log.debug("determineReplicationTree: new sigma %s" % self.sigma )
          except ValueError:
            self.log.warn("determineReplicationTree: can't set new sigma value from '%s'" % strategy )
        if reStrategy.pattern in [ "MinimiseTotalWait", "DynamicThroughput" ]:
          replicasToUse = replicas.keys() if sourceSE == None else [ sourceSE ]
          tree = self.strategyDispatcher[ reStrategy ].__call__( replicasToUse, targetSEs  )
        elif reStrategy.pattern == "Simple":
          if not sourceSE in replicas.keys():
            return S_ERROR( "File does not exist at specified source site" )
          tree = self.__simple( sourceSE, targetSEs )
        elif reStrategy.pattern == "Swarm":
          tree = self.__swarm( targetSEs[0], replicas.keys() )
      
    # Now update the queues to reflect the chosen strategies
    for channelID in tree:
      self.channels[channelID]["Files"] += 1
      self.channels[channelID]["Size"] += size

    return S_OK( tree )

  def __selectStrategy( self ):
    """ If more than one active strategy use one after the other.

    :param self: self reference
    """
    chosenStrategy = self.activeStrategies[self.chosenStrategy]
    self.chosenStrategy += 1
    if self.chosenStrategy == self.numberOfStrategies:
      self.chosenStrategy = 0
    return chosenStrategy

  def __simple( self, sourceSE, destSEs ):
    """ This just does a simple replication from the source to all the targets.

    :param self: self reference
    :param str sourceSE: source storage element name
    :param list destSEs: destination storage elements  
    """
    tree = {}
    if not self.__getActiveSEs( [ sourceSE ] ):
      return tree
    sourceSites = self.__getChannelSitesForSE( sourceSE )
    for destSE in destSEs:
      destSites = self.__getChannelSitesForSE( destSE )
      for channelID, channelDict in self.channels.items():
        if channelID in tree: 
          continue
        if channelDict["Source"] in sourceSites and channelDict["Destination"] in destSites:
          tree[channelID] = { "Ancestor" : False, 
                              "SourceSE" : sourceSE, 
                              "DestSE" : destSE,
                              "Strategy" : "Simple" }
    return tree

  def __swarm( self, destSE, replicas ):
    """ This strategy is to be used to the data the the target site as quickly as possible from any source.

    :param self: self reference
    :param str destSE: destination storage element
    :param list replicas: replicas dictionary keys
    """
    tree = {}
    res = self.__getTimeToStart()
    if not res["OK"]:
      self.log.error( res["Message"] )
      return tree
    channelInfo = res["Value"]
    minTimeToStart = float( "inf" )

    sourceSEs = self.__getActiveSEs( replicas )
    destSites = self.__getChannelSitesForSE( destSE )

    selectedChannelID = None
    selectedSourceSE = None
    selectedDestSE = None

    for destSite in destSites:
      for sourceSE in sourceSEs:
        for sourceSite in self.__getChannelSitesForSE( sourceSE ):
          channelName = "%s-%s" % ( sourceSite, destSite )
          if channelName not in channelInfo:
            errStr = "__swarm: Channel not defined"
            self.log.warn( errStr, channelName )
            continue
          channelTimeToStart = channelInfo[channelName]["TimeToStart"]
          if channelTimeToStart <= minTimeToStart:
            minTimeToStart = channelTimeToStart
            selectedSourceSE = sourceSE
            selectedDestSE = destSE
            selectedChannelID = channelInfo[channelName]["ChannelID"]
         
    if selectedChannelID and selectedSourceSE and selectedDestSE:
      tree[selectedChannelID] = { "Ancestor" : False,
                                  "SourceSE" : selectedSourceSE,
                                  "DestSE" : selectedDestSE,
                                  "Strategy" : "Swarm" }
    return tree

  def __dynamicThroughput( self, sourceSEs, destSEs ):
    """ This creates a replication tree based on observed throughput on the channels.

    :param self: self reference
    :param list sourceSEs: source storage elements names
    :param list destSEs: destination storage elements names
    """
    tree = {}
    res = self.__getTimeToStart()
    if not res["OK"]:
      self.log.error( res["Message"] )
      return tree
    channelInfo = res["Value"]

    timeToSite = {}   # Maintains time to site including previous hops
    siteAncestor = {} # Maintains the ancestor channel for a site

    while len( destSEs ) > 0:
      try:
        minTotalTimeToStart = float( "inf" )
        candidateChannels = []
        sourceActiveSEs = self.__getActiveSEs( sourceSEs )
        for destSE in destSEs:
          destSites = self.__getChannelSitesForSE( destSE )
          for destSite in destSites:
            for sourceSE in sourceActiveSEs:
              sourceSites = self.__getChannelSitesForSE( sourceSE )
              for sourceSite in sourceSites:
                channelName = "%s-%s" % ( sourceSite, destSite )
                if channelName not in channelInfo:
                  self.log.warn( "dynamicThroughput: bailing out! channel %s not defined " % channelName )
                  raise StrategyHandlerChannelNotDefined( channelName )

                channelID = channelInfo[channelName]["ChannelID"]
                if channelID in tree:
                  continue
                channelTimeToStart = channelInfo[channelName]["TimeToStart"]

                totalTimeToStart = channelTimeToStart
                if sourceSE in timeToSite:
                  totalTimeToStart += timeToSite[sourceSE] + self.sigma
                  
                if ( sourceSite == destSite ) :
                  selectedPathTimeToStart = totalTimeToStart
                  candidateChannels = [ ( sourceSE, destSE, channelID ) ]
                  raise StrategyHandlerLocalFound( candidateChannels )

                if totalTimeToStart < minTotalTimeToStart:
                  minTotalTimeToStart = totalTimeToStart
                  selectedPathTimeToStart = totalTimeToStart
                  candidateChannels = [ ( sourceSE, destSE, channelID ) ]
                elif totalTimeToStart == minTotalTimeToStart and totalTimeToStart < float("inf"):
                  minTotalTimeToStart = totalTimeToStart
                  selectedPathTimeToStart = totalTimeToStart
                  candidateChannels.append( ( sourceSE, destSE, channelID ) )
               
      except StrategyHandlerLocalFound:
        pass

      random.shuffle( candidateChannels )
      selectedSourceSE, selectedDestSE, selectedChannelID = candidateChannels[0]
      timeToSite[selectedDestSE] = selectedPathTimeToStart
      siteAncestor[selectedDestSE] = selectedChannelID
      
      waitingChannel = False if selectedSourceSE not in siteAncestor else siteAncestor[selectedSourceSE]
    
      tree[selectedChannelID] = { "Ancestor" : waitingChannel,
                                  "SourceSE" : selectedSourceSE,
                                  "DestSE" : selectedDestSE,
                                  "Strategy" : "DynamicThroughput" }
      sourceSEs.append( selectedDestSE )
      destSEs.remove( selectedDestSE )
    return tree

  def __minimiseTotalWait( self, sourceSEs, destSEs ):
    """ This creates a replication tree based on observed throughput on the channels.

    :param self: self reference
    :param list sourceSEs: source storage elements names
    :param list destSEs: destination storage elements names
    """

    self.log.debug( "sourceSEs = %s" % sourceSEs )
    self.log.debug( "destSEs = %s" % destSEs )
    
    tree = {}
    res = self.__getTimeToStart()
    if not res["OK"]:
      self.log.error( res["Message"] )
      return tree
    channelInfo = res["Value"]

    timeToSite = {}                # Maintains time to site including previous hops
    siteAncestor = {}              # Maintains the ancestor channel for a site
    primarySources = sourceSEs

    while destSEs:
      try:
        minTotalTimeToStart = float( "inf" )
        candidateChannels = []
        sourceActiveSEs = self.__getActiveSEs( sourceSEs )
        for destSE in destSEs:
          destSites = self.__getChannelSitesForSE( destSE )
          for destSite in destSites:
            for sourceSE in sourceActiveSEs:
              sourceSites = self.__getChannelSitesForSE( sourceSE )
              for sourceSite in sourceSites:
                channelName = "%s-%s" % ( sourceSite, destSite )

                if channelName not in channelInfo:
                  continue
                
                channelID = channelInfo[channelName]["ChannelID"]
                # If this channel is already used, look for another sourceSE
                if channelID in tree:
                  continue
                channelTimeToStart = channelInfo[channelName]["TimeToStart"]
                if not sourceSE in primarySources:
                  channelTimeToStart += self.sigma
                ## local transfer found
                if sourceSite == destSite:
                  selectedPathTimeToStart = channelTimeToStart
                  candidateChannels = [ ( sourceSE, destSE, channelID ) ]
                  ## bail out to save rainforests
                  raise StrategyHandlerLocalFound( candidateChannels )
                if channelTimeToStart < minTotalTimeToStart:
                  minTotalTimeToStart = channelTimeToStart
                  selectedPathTimeToStart = channelTimeToStart
                  candidateChannels = [ ( sourceSE, destSE, channelID ) ]
                elif channelTimeToStart == minTotalTimeToStart and channelTimeToStart != float("inf"):
                  minTotalTimeToStart = channelTimeToStart
                  selectedPathTimeToStart = channelTimeToStart
                  candidateChannels.append( ( sourceSE, destSE, channelID ) )

      except StrategyHandlerLocalFound:
        pass

      if not candidateChannels:
        return tree
      
      ## shuffle candidates and pick the 1st one
      random.shuffle( candidateChannels )
      selectedSourceSE, selectedDestSE, selectedChannelID = candidateChannels[0]
      timeToSite[selectedDestSE] = selectedPathTimeToStart
      siteAncestor[selectedDestSE] = selectedChannelID
      waitingChannel = False if selectedSourceSE not in siteAncestor else siteAncestor[selectedSourceSE]

      tree[selectedChannelID] = { "Ancestor" : waitingChannel,
                                  "SourceSE" : selectedSourceSE,
                                  "DestSE" : selectedDestSE,
                                  "Strategy" : "MinimiseTotalWait" }
      sourceSEs.append( selectedDestSE )
      destSEs.remove( selectedDestSE )
      
    return tree

  def __getTimeToStart( self ):
    """ Generate the dictionary of times to start based on task queue contents and observed throughput.

    :param self: self reference
    """

    if self.schedulingType not in ( "File", "Throughput" ):
      errStr = "__getTimeToStart: CS SchedulingType entry must be either 'File' or 'Throughput'"
      self.log.error( errStr )
      return S_ERROR( errStr )

    channelInfo = {}
    for channelID, bandwidth in self.bandwidths.items():

      channelDict = self.channels[channelID] 
      channelName = channelDict["ChannelName"]

      # initial equal 0.0
      timeToStart = 0.0

      channelStatus = channelDict["Status"]

      ## channel is active?
      if channelStatus == "Active":
        
        channelFileSuccess = bandwidth["SuccessfulFiles"]
        channelFileFailed = bandwidth["FailedFiles"]
        attempted = channelFileSuccess + channelFileFailed
        

        successRate = 100.0
        if attempted != 0:
          successRate = 100.0 * ( channelFileSuccess / float( attempted ) )
    
        ## get distinct failed files counter
        distinctFailedFiles = self.failedFiles.get( channelID, 0 )      
    
        ## success rate too low and more than acceptable distinct files are affected?, make channel unattractive
        if ( successRate < self.acceptableFailureRate ) and ( distinctFailedFiles > self.acceptableFailedFiles ):
          timeToStart = float( "inf" ) 
        else:

          ## scheduling type == Throughput
          transferSpeed = bandwidth["Throughput"] 
          waitingTransfers = channelDict["Size"]

          ## scheduling type == File, overwrite transferSpeed and waitingTransfer
          if self.schedulingType == "File":
            transferSpeed = bandwidth["Fileput"] 
            waitingTransfers = channelDict["Files"]

          if transferSpeed > 0:
            timeToStart = waitingTransfers / float( transferSpeed )
            
      else:
        ## channel not active, make it unattractive
        timeToStart = float( "inf" ) 

      channelInfo.setdefault( channelName, { "ChannelID" : channelID, 
                                             "TimeToStart" : timeToStart } )

    return S_OK( channelInfo )

  def __getActiveSEs( self, seList, access = "Read" ):
    """Get active storage elements.

    :param self: self reference
    :param list seList: stogare element list
    :param str access: storage element accesss, could be 'Read' (default) or 'Write' 
    """
    res = self.resourceStatus.getStorageElementStatus( seList, statusType = access, default = 'Unknown' )
    if not res["OK"]:
      return []
    return [ k for k, v in res["Value"].items() if access in v and v[access] in ( "Active", "Bad" ) ]
   
  def __getChannelSitesForSE( self, storageElement ):
    """Get sites for given storage element.
    
    :param self: self reference
    :param str storageElement: storage element name
    """
    res = getSitesForSE( storageElement )
    if not res["OK"]:
      return []
    sites = []
    for site in res["Value"]:
      siteName = site.split( "." )
      if len( siteName ) > 1:
        if not siteName[1] in sites:
          sites.append( siteName[1] )
    return sites
Exemplo n.º 30
0
class FTS3ServerPolicy(object):
  """
  This class manages the policy for choosing a server
  """

  def __init__(self, serverDict, serverPolicy="Random"):
    """
        Call the init of the parent, and initialize the list of FTS3 servers
    """

    self.log = gLogger.getSubLogger("FTS3ServerPolicy")

    self._serverDict = serverDict
    self._serverList = serverDict.keys()
    self._maxAttempts = len(self._serverList)
    self._nextServerID = 0
    self._resourceStatus = ResourceStatus()

    methName = "_%sServerPolicy" % serverPolicy.lower()
    if not hasattr(self, methName):
      self.log.error('Unknown server policy %s. Using Random instead' % serverPolicy)
      methName = "_randomServerPolicy"

    self._policyMethod = getattr(self, methName)

  def _failoverServerPolicy(self, _attempt):
    """
       Returns always the server at a given position (normally the first one)

       :param attempt: position of the server in the list
    """
    if _attempt >= len(self._serverList):
      raise Exception(
          "FTS3ServerPolicy.__failoverServerPolicy: attempt to reach non existing server index")
    return self._serverList[_attempt]

  def _sequenceServerPolicy(self, _attempt):
    """
       Every time the this policy is called, return the next server on the list
    """

    fts3server = self._serverList[self._nextServerID]
    self._nextServerID = (self._nextServerID + 1) % len(self._serverList)
    return fts3server

  def _randomServerPolicy(self, _attempt):
    """
      return a server from shuffledServerList
    """

    if getattr(threadLocal, 'shuffledServerList', None) is None:
      threadLocal.shuffledServerList = self._serverList[:]
      random.shuffle(threadLocal.shuffledServerList)

    fts3Server = threadLocal.shuffledServerList[_attempt]

    if _attempt == self._maxAttempts - 1:
      random.shuffle(threadLocal.shuffledServerList)

    return fts3Server

  def _getFTSServerStatus(self, ftsServer):
    """ Fetch the status of the FTS server from RSS """

    res = self._resourceStatus.getElementStatus(ftsServer, 'FTS')
    if not res['OK']:
      return res

    result = res['Value']
    if ftsServer not in result:
      return S_ERROR("No FTS Server %s known to RSS" % ftsServer)

    if result[ftsServer]['all'] == 'Active':
      return S_OK(True)

    return S_OK(False)

  def chooseFTS3Server(self):
    """
      Choose the appropriate FTS3 server depending on the policy
    """

    fts3Server = None
    attempt = 0

    while not fts3Server and attempt < self._maxAttempts:

      fts3Server = self._policyMethod(attempt)
      res = self._getFTSServerStatus(fts3Server)

      if not res['OK']:
        self.log.warn("Error getting the RSS status for %s: %s" % (fts3Server, res))
        fts3Server = None
        attempt += 1
        continue

      ftsServerStatus = res['Value']

      if not ftsServerStatus:
        self.log.warn('FTS server %s is not in good shape. Choose another one' % fts3Server)
        fts3Server = None
        attempt += 1

    if fts3Server:
      return S_OK(self._serverDict[fts3Server])

    return S_ERROR("Could not find an FTS3 server (max attempt reached)")
Exemplo n.º 31
0
class InputDataAgent( OptimizerModule ):
  """
      The specific Optimizer must provide the following methods:
      - initializeOptimizer() before each execution cycle
      - checkJob() - the main method called for each job
  """

  #############################################################################
  def initializeOptimizer( self ):
    """Initialize specific parameters for JobSanityAgent.
    """
    self.failedMinorStatus = self.am_getOption( '/FailedJobStatus', 'Input Data Not Available' )
    #this will ignore failover SE files
    self.checkFileMetadata = self.am_getOption( 'CheckFileMetadata', True )

    self.dataManager = DataManager()
    self.resourceStatus = ResourceStatus()
    self.fc = FileCatalog()

    self.seToSiteMapping = {}
    self.lastCScheck = 0
    self.cacheLength = 600

    return S_OK()

  #############################################################################
  def checkJob( self, job, classAdJob ):
    """
    This method does the optimization corresponding to this Agent, 
    it is call for each job by the Optimizer framework
    """

    result = self.jobDB.getInputData( job )
    if not result['OK']:
      self.log.warn( 'Failed to get input data from JobdB for %s' % ( job ) )
      self.log.warn( result['Message'] )
      return result
    if not result['Value']:
      self.log.verbose( 'Job %s has no input data requirement' % ( job ) )
      return self.setNextOptimizer( job )

    #Check if we already executed this Optimizer and the input data is resolved
    res = self.getOptimizerJobInfo( job, self.am_getModuleParam( 'optimizerName' ) )
    if res['OK'] and len( res['Value'] ):
      pass
    else:
      self.log.verbose( 'Job %s has an input data requirement and will be processed' % ( job ) )
      inputData = result['Value']
      result = self.__resolveInputData( job, inputData )
      if not result['OK']:
        self.log.warn( result['Message'] )
        return result

    return self.setNextOptimizer( job )

  #############################################################################
  def __resolveInputData( self, job, inputData ):
    """This method checks the file catalog for replica information.
    """
    lfns = [ fname.replace( 'LFN:', '' ) for fname in inputData ]

    start = time.time()
    # In order to place jobs on Hold if a certain SE is banned we need first to check first if
    # if the replicas are really available
    replicas = self.dataManager.getActiveReplicas( lfns )
    timing = time.time() - start
    self.log.verbose( 'Catalog Replicas Lookup Time: %.2f seconds ' % ( timing ) )
    if not replicas['OK']:
      self.log.warn( replicas['Message'] )
      return replicas

    replicaDict = replicas['Value']

    siteCandidates = self.__checkReplicas( job, replicaDict )

    if not siteCandidates['OK']:
      self.log.warn( siteCandidates['Message'] )
      return siteCandidates

    if self.checkFileMetadata:
      guids = True
      start = time.time()
      guidDict = self.fc.getFileMetadata( lfns )
      timing = time.time() - start
      self.log.info( 'Catalog Metadata Lookup Time: %.2f seconds ' % ( timing ) )

      if not guidDict['OK']:
        self.log.warn( guidDict['Message'] )
        guids = False

      failed = guidDict['Value']['Failed']
      if failed:
        self.log.warn( 'Failed to establish some GUIDs' )
        self.log.warn( failed )
        guids = False

      if guids:
        for lfn, reps in replicaDict['Successful'].items():
          guidDict['Value']['Successful'][lfn].update( reps )
        replicas = guidDict

    resolvedData = {}
    resolvedData['Value'] = replicas
    resolvedData['SiteCandidates'] = siteCandidates['Value']
    result = self.setOptimizerJobInfo( job, self.am_getModuleParam( 'optimizerName' ), resolvedData )
    if not result['OK']:
      self.log.warn( result['Message'] )
      return result
    return S_OK( resolvedData )

  #############################################################################
  def __checkReplicas( self, job, replicaDict ):
    """Check that all input lfns have valid replicas and can all be found at least in one single site.
    """
    badLFNs = []

    if replicaDict.has_key( 'Successful' ):
      for lfn, reps in replicaDict['Successful'].items():
        if not reps:
          badLFNs.append( 'LFN:%s Problem: No replicas available' % ( lfn ) )
    else:
      return S_ERROR( 'No replica Info available' )

    if replicaDict.has_key( 'Failed' ):
      for lfn, cause in replicaDict['Failed'].items():
        badLFNs.append( 'LFN:%s Problem: %s' % ( lfn, cause ) )

    if badLFNs:
      self.log.info( 'Found %s problematic LFN(s) for job %s' % ( len( badLFNs ), job ) )
      param = '\n'.join( badLFNs )
      self.log.info( param )
      result = self.setJobParam( job, self.am_getModuleParam( 'optimizerName' ), param )
      if not result['OK']:
        self.log.error( result['Message'] )
      return S_ERROR( 'Input Data Not Available' )

    return self.__getSiteCandidates( replicaDict['Successful'] )

  #############################################################################
  # FIXME: right now this is unused...
  def __checkActiveSEs( self, job, replicaDict ):
    """
    Check active SE and replicas and identify possible Site candidates for 
    the execution of the job
    """
    # Now let's check if some replicas might not be available due to banned SE's
    activeReplicas = self.dataManager.checkActiveReplicas( replicaDict )
    if not activeReplicas['OK']:
      # due to banned SE's input data might no be available
      msg = "On Hold: Missing replicas due to banned SE"
      self.log.info( msg )
      self.log.warn( activeReplicas['Message'] )
      return S_ERROR( msg )

    activeReplicaDict = activeReplicas['Value']

    siteCandidates = self.__checkReplicas( job, activeReplicaDict )

    if not siteCandidates['OK']:
      # due to a banned SE's input data is not available at a single site      
      msg = "On Hold: Input data not Available due to banned SE"
      self.log.info( msg )
      self.log.warn( siteCandidates['Message'] )
      return S_ERROR( msg )

    resolvedData = {}
    resolvedData['Value'] = activeReplicas
    resolvedData['SiteCandidates'] = siteCandidates['Value']
    result = self.setOptimizerJobInfo( job, self.am_getModuleParam( 'optimizerName' ), resolvedData )
    if not result['OK']:
      self.log.warn( result['Message'] )
      return result
    return S_OK( resolvedData )


  #############################################################################
  def __getSitesForSE( self, se ):
    """ Returns a list of sites having the given SE as a local one.
        Uses the local cache of the site-se information
    """

    # Empty the cache if too old
    if ( time.time() - self.lastCScheck ) > self.cacheLength:
      self.log.verbose( 'Resetting the SE to site mapping cache' )
      self.seToSiteMapping = {}
      self.lastCScheck = time.time()

    if se not in self.seToSiteMapping:
      sites = getSitesForSE( se )
      if sites['OK']:
        self.seToSiteMapping[se] = list( sites['Value'] )
      return sites
    else:
      return S_OK( self.seToSiteMapping[se] )

  #############################################################################
  def __getSiteCandidates( self, inputData ):
    """This method returns a list of possible site candidates based on the
       job input data requirement.  For each site candidate, the number of files
       on disk and tape is resolved.
    """

    fileSEs = {}
    for lfn, replicas in inputData.items():
      siteList = []
      for se in replicas.keys():
        sites = self.__getSitesForSE( se )
        if sites['OK']:
          siteList += sites['Value']
      fileSEs[lfn] = uniqueElements( siteList )

    siteCandidates = []
    i = 0
    for _fileName, sites in fileSEs.items():
      if not i:
        siteCandidates = sites
      else:
        tempSite = []
        for site in siteCandidates:
          if site in sites:
            tempSite.append( site )
        siteCandidates = tempSite
      i += 1

    if not len( siteCandidates ):
      return S_ERROR( 'No candidate sites available' )

    #In addition, check number of files on tape and disk for each site
    #for optimizations during scheduling
    siteResult = {}
    for site in siteCandidates:
      siteResult[site] = { 'disk': [], 'tape': [] }

    seDict = {}
    for lfn, replicas in inputData.items():
      for se in replicas.keys():
        if se not in seDict:
          sites = self.__getSitesForSE( se )
          if not sites['OK']:
            continue
          try:
            #storageElement = StorageElement( se )
            result = self.resourceStatus.getStorageElementStatus( se, statusType = 'ReadAccess' )
            if not result['OK']:
              continue
            seDict[se] = { 'Sites': sites['Value'], 'SEParams': result['Value'][se] }
            result = getStorageElementOptions( se )
            if not result['OK']:
              continue
            seDict[se]['SEParams'].update(result['Value'])
          except Exception:
            self.log.exception( 'Failed to instantiate StorageElement( %s )' % se )
            continue
        for site in seDict[se]['Sites']:
          if site in siteCandidates:
            if seDict[se]['SEParams']['ReadAccess'] and seDict[se]['SEParams']['DiskSE']:
              if lfn not in siteResult[site]['disk']:
                siteResult[site]['disk'].append( lfn )
                if lfn in siteResult[site]['tape']:
                  siteResult[site]['tape'].remove( lfn )
            if seDict[se]['SEParams']['ReadAccess'] and seDict[se]['SEParams']['TapeSE']:
              if lfn not in siteResult[site]['tape'] and lfn not in siteResult[site]['disk']:
                siteResult[site]['tape'].append( lfn )

    for site in siteResult:
      siteResult[site]['disk'] = len( siteResult[site]['disk'] )
      siteResult[site]['tape'] = len( siteResult[site]['tape'] )
    return S_OK( siteResult )
Exemplo n.º 32
0
from DIRAC.Core.Utilities.List import sortList

storageCFGBase = "/Resources/StorageElements"

res = gConfig.getSections(storageCFGBase, True)
if not res['OK']:
    gLogger.error('Failed to get storage element info')
    gLogger.error(res['Message'])
    DIRAC.exit(-1)

gLogger.info("%s %s %s" % ('Storage Element'.ljust(25),
                           'Read Status'.rjust(15), 'Write Status'.rjust(15)))

seList = sortList(res['Value'])

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus(seList)
if not res['OK']:
    gLogger.error("Failed to get StorageElement status for %s" % str(seList))

for k, v in res['Value'].items():

    readState, writeState = 'Active', 'Active'

    if v.has_key('ReadAccess'):
        readState = v['ReadAccess']

    if v.has_key('WriteAccess'):
        writeState = v['WriteAccess']
    gLogger.notice("%s %s %s" %
Exemplo n.º 33
0
if __name__ == "__main__":
  
  result = getVOfromProxyGroup()
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 1 )
  vo = result['Value']  
  resources = Resources( vo = vo )
  result = resources.getEligibleStorageElements()
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 2 )
  seList = sortList( result[ 'Value' ] )

  resourceStatus = ResourceStatus()
 
  result = resourceStatus.getStorageStatus( seList )
  if not result['OK']:
    gLogger.notice( 'Error:', result['Message'] )
    DIRAC.exit( 3 )

  for k,v in result[ 'Value' ].items():
    
    readState, writeState = 'Active', 'Active'
    
    if v.has_key( 'ReadAccess' ):
      readState = v[ 'ReadAccess' ]  
    
    if v.has_key( 'WriteAccess' ):
      writeState = v[ 'WriteAccess']
Exemplo n.º 34
0
class StrategyHandler(object):
    """
  .. class:: StrategyHandler

  StrategyHandler is a helper class for determining optimal replication tree for given
  source files, their replicas and target storage elements.
  """
    def __init__(self,
                 configSection,
                 channels=None,
                 bandwidths=None,
                 failedFiles=None):
        """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
        ## save config section
        self.configSection = configSection + "/" + self.__class__.__name__
        ##

        ## sublogger
        self.log = gLogger.getSubLogger("StrategyHandler", child=True)
        self.log.setLevel(
            gConfig.getValue(self.configSection + "/LogLevel", "DEBUG"))

        self.supportedStrategies = [
            'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait'
        ]
        self.log.info("Supported strategies = %s" %
                      ", ".join(self.supportedStrategies))

        self.sigma = gConfig.getValue(self.configSection + '/HopSigma', 0.0)
        self.log.info("HopSigma = %s" % self.sigma)
        self.schedulingType = gConfig.getValue(
            self.configSection + '/SchedulingType', 'File')
        self.log.info("SchedulingType = %s" % self.schedulingType)
        self.activeStrategies = gConfig.getValue(
            self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'])
        self.log.info("ActiveStrategies = %s" %
                      ", ".join(self.activeStrategies))
        self.numberOfStrategies = len(self.activeStrategies)
        self.log.info("Number of active strategies = %s" %
                      self.numberOfStrategies)
        self.acceptableFailureRate = gConfig.getValue(
            self.configSection + '/AcceptableFailureRate', 75)
        self.log.info("AcceptableFailureRate = %s" %
                      self.acceptableFailureRate)
        self.acceptableFailedFiles = gConfig.getValue(
            self.configSection + "/AcceptableFailedFiles", 5)
        self.log.info("AcceptableFailedFiles = %s" %
                      self.acceptableFailedFiles)
        self.rwUpdatePeriod = gConfig.getValue(
            self.configSection + "/RssRWUpdatePeriod", 600)
        self.log.info("RSSUpdatePeriod = %s s" % self.rwUpdatePeriod)
        self.rwUpdatePeriod = datetime.timedelta(seconds=self.rwUpdatePeriod)
        ## bandwithds
        self.bandwidths = bandwidths if bandwidths else {}
        ## channels
        self.channels = channels if channels else {}
        ## distinct failed files per channel
        self.failedFiles = failedFiles if failedFiles else {}
        ## chosen strategy
        self.chosenStrategy = 0
        ## fts graph
        self.ftsGraph = None
        ## timestamp for last update
        self.lastRssUpdate = datetime.datetime.now()
        # dispatcher
        self.strategyDispatcher = {
            "MinimiseTotalWait": self.minimiseTotalWait,
            "DynamicThroughput": self.dynamicThroughput,
            "Simple": self.simple,
            "Swarm": self.swarm
        }
        ## own RSS client
        self.resourceStatus = ResourceStatus()
        ## create fts graph
        ftsGraph = self.setup(self.channels, self.bandwidths, self.failedFiles)
        if not ftsGraph["OK"]:
            raise SHGraphCreationError(ftsGraph["Message"])
        self.log.info("%s has been constructed" % self.__class__.__name__)

    def setup(self, channels, bandwithds, failedFiles):
        """ prepare fts graph 

    :param dict channels: { channelID : { "Files" : long , Size = long, "ChannelName" : str, 
                                          "Source" : str, "Destination" : str , "ChannelName" : str, "Status" : str  } }
    :param dict bandwidths: { channelID { "Throughput" : float, "Fileput" : float, "SucessfulFiles" : long, "FailedFiles" : long  } }
    :param dict failedFiles: { channelID : int }

    channelInfo { channelName : { "ChannelID" : int, "TimeToStart" : float} }  
    """
        graph = FTSGraph("sites")

        result = getStorageElementSiteMapping()
        if not result['OK']:
            return result
        sitesDict = result['Value']

        ## create nodes
        for site, ses in sitesDict.items():
            rwDict = self.__getRWAccessForSE(ses)
            if not rwDict["OK"]:
                return rwDict
            siteName = site
            if '.' in site:
                siteName = site.split('.')[1]
            graph.addNode(LCGSite(siteName, {"SEs": rwDict["Value"]}))
        ## channels { channelID : { "Files" : long , Size = long, "ChannelName" : str,
        ##                          "Source" : str, "Destination" : str ,
        ##                          "ChannelName" : str, "Status" : str  } }
        ## bandwidths { channelID { "Throughput" : float, "Fileput" : float,
        ##                           "SucessfulFiles" : long, "FailedFiles" : long  } }
        ## channelInfo { channelName : { "ChannelID" : int, "TimeToStart" : float} }
        for channelID, channelDict in channels.items():
            sourceName = channelDict["Source"]
            destName = channelDict["Destination"]
            fromNode = graph.getNode(sourceName)
            toNode = graph.getNode(destName)
            if fromNode and toNode:
                rwAttrs = {
                    "status": channels[channelID]["Status"],
                    "files": channelDict["Files"],
                    "size": channelDict["Size"],
                    "successfulAttempts":
                    bandwithds[channelID]["SuccessfulFiles"],
                    "failedAttempts": bandwithds[channelID]["FailedFiles"],
                    "distinctFailedFiles": failedFiles.get(channelID, 0),
                    "fileput": bandwithds[channelID]["Fileput"],
                    "throughput": bandwithds[channelID]["Throughput"]
                }
                roAttrs = {
                    "channelID": channelID,
                    "channelName": channelDict["ChannelName"],
                    "acceptableFailureRate": self.acceptableFailureRate,
                    "acceptableFailedFiles": self.acceptableFailedFiles,
                    "schedulingType": self.schedulingType
                }
                ftsChannel = FTSChannel(fromNode, toNode, rwAttrs, roAttrs)
                graph.addEdge(ftsChannel)
        self.ftsGraph = graph
        self.lastRssUpdate = datetime.datetime.now()
        return S_OK()

    def updateGraph(self, rwAccess=False, replicationTree=None, size=0.0):
        """ update rw access for nodes (sites) and size anf files for edges (channels) """
        replicationTree = replicationTree if replicationTree else {}
        size = size if size else 0.0
        ## update nodes rw access for SEs
        if rwAccess:
            for lcgSite in self.ftsGraph.nodes():
                rwDict = self.__getRWAccessForSE(lcgSite.SEs.keys())
                if not rwDict["OK"]:
                    return rwDict
                lcgSite.SEs = rwDict["Value"]
        ## update channels size and files
        if replicationTree:
            for channel in self.ftsGraph.edges():
                if channel.channelID in replicationTree:
                    channel.size += size
                    channel.files += 1
        return S_OK()

    def simple(self, sourceSEs, targetSEs):
        """ simple strategy - one source, many targets

    :param list sourceSEs: list with only one sourceSE name
    :param list targetSEs: list with target SE names
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
        ## make targetSEs list unique
        if len(sourceSEs) != 1:
            return S_ERROR(
                "simple: wrong argument supplied for sourceSEs, only one sourceSE allowed"
            )
        sourceSE = sourceSEs[0]
        tree = {}
        for targetSE in targetSEs:
            channel = self.ftsGraph.findChannel(sourceSE, targetSE)
            if not channel["OK"]:
                return S_ERROR(channel["Message"])
            channel = channel["Value"]
            if not channel.fromNode.SEs[sourceSE]["read"]:
                return S_ERROR(
                    "simple: sourceSE '%s' in banned for reading rigth now" %
                    sourceSE)
            if not channel.toNode.SEs[targetSE]["write"]:
                return S_ERROR(
                    "simple: targetSE '%s' is banned for writing rigth now" %
                    targetSE)
            if channel.channelID in tree:
                return S_ERROR( "simple: unable to create replication tree, channel '%s' cannot be used twice" %\
                                  channel.channelName )
            tree[channel.channelID] = {
                "Ancestor": False,
                "SourceSE": sourceSE,
                "DestSE": targetSE,
                "Strategy": "Simple"
            }

        return S_OK(tree)

    def swarm(self, sourceSEs, targetSEs):
        """ swarm strategy - one target, many sources, pick up the fastest 
    
    :param list sourceSEs: list of source SE 
    :param str targetSEs: on element list with name of target SE
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
        tree = {}
        channels = []
        if len(targetSEs) > 1:
            return S_ERROR(
                "swarm: wrong argument supplied for targetSEs, only one targetSE allowed"
            )
        targetSE = targetSEs[0]
        ## find channels
        for sourceSE in sourceSEs:
            channel = self.ftsGraph.findChannel(sourceSE, targetSE)
            if not channel["OK"]:
                self.log.warn("swarm: %s" % channel["Message"])
                continue
            channels.append((sourceSE, channel["Value"]))
        ## exit - no channels
        if not channels:
            return S_ERROR(
                "swarm: unable to find FTS channels between '%s' and '%s'" %
                (",".join(sourceSEs), targetSE))
        ## filter out non active channels
        channels = [
            (sourceSE, channel) for sourceSE, channel in channels
            if channel.fromNode.SEs[sourceSE]["read"]
            and channel.toNode.SEs[targetSE]["write"] and channel.status ==
            "Active" and channel.timeToStart < float("inf")
        ]
        ## exit - no active channels
        if not channels:
            return S_ERROR(
                "swarm: no active channels found between %s and %s" %
                (sourceSEs, targetSE))

        ## find min timeToStart
        minTimeToStart = float("inf")
        selSourceSE = selChannel = None
        for sourceSE, ftsChannel in channels:
            if ftsChannel.timeToStart < minTimeToStart:
                minTimeToStart = ftsChannel.timeToStart
                selSourceSE = sourceSE
                selChannel = ftsChannel

        if not selSourceSE:
            return S_ERROR(
                "swarm: no active channels found between %s and %s" %
                (sourceSEs, targetSE))

        tree[selChannel.channelID] = {
            "Ancestor": False,
            "SourceSE": selSourceSE,
            "DestSE": targetSE,
            "Strategy": "Swarm"
        }
        return S_OK(tree)

    def minimiseTotalWait(self, sourceSEs, targetSEs):
        """ find dag that minimises start time 
    
    :param list sourceSEs: list of avialable source SEs
    :param list targetSEs: list of target SEs
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
        tree = {}
        primarySources = sourceSEs
        while targetSEs:
            minTimeToStart = float("inf")
            channels = []
            for targetSE in targetSEs:
                for sourceSE in sourceSEs:
                    ftsChannel = self.ftsGraph.findChannel(sourceSE, targetSE)
                    if not ftsChannel["OK"]:
                        self.log.warn("minimiseTotalWait: %s" %
                                      ftsChannel["Message"])
                        continue
                    ftsChannel = ftsChannel["Value"]
                    channels.append((ftsChannel, sourceSE, targetSE))
            if not channels:
                msg = "minimiseTotalWait: FTS channels between %s and %s not defined" % (
                    ",".join(sourceSEs), ",".join(targetSEs))
                self.log.error(msg)
                return S_ERROR(msg)
            ## filter out already used channels
            channels = [(channel, sourceSE, targetSE)
                        for channel, sourceSE, targetSE in channels
                        if channel.channelID not in tree]
            if not channels:
                msg = "minimiseTotalWait: all FTS channels between %s and %s are already used in tree" % (
                    ",".join(sourceSEs), ",".join(targetSEs))
                self.log.error(msg)
                return S_ERROR(msg)

            self.log.debug(
                "minimiseTotalWait: found %s candiate channels, checking activity"
                % len(channels))
            channels = [
                (channel, sourceSE, targetSE)
                for channel, sourceSE, targetSE in channels
                if channel.fromNode.SEs[sourceSE]["read"]
                and channel.toNode.SEs[targetSE]["write"] and channel.status ==
                "Active" and channel.timeToStart < float("inf")
            ]

            if not channels:
                self.log.error(
                    "minimiseTotalWait: no active FTS channels found")
                return S_ERROR(
                    "minimiseTotalWait: no active FTS channels found")

            candidates = []
            for channel, sourceSE, targetSE in channels:
                timeToStart = channel.timeToStart
                if sourceSE not in primarySources:
                    timeToStart += self.sigma
                ## local found
                if channel.fromNode == channel.toNode:
                    self.log.debug(
                        "minimiseTotalWait: found local channel '%s'" %
                        channel.channelName)
                    candidates = [(channel, sourceSE, targetSE)]
                    break
                if timeToStart <= minTimeToStart:
                    minTimeToStart = timeToStart
                    candidates = [(channel, sourceSE, targetSE)]
                elif timeToStart == minTimeToStart:
                    candidates.append((channel, sourceSE, targetSE))

            if not candidates:
                return S_ERROR(
                    "minimiseTotalWait: unable to find candidate FTS channels minimising total wait time"
                )

            random.shuffle(candidates)
            selChannel, selSourceSE, selTargetSE = candidates[0]
            ancestor = False
            for channelID, treeItem in tree.items():
                if selSourceSE in treeItem["DestSE"]:
                    ancestor = channelID
            tree[selChannel.channelID] = {
                "Ancestor": ancestor,
                "SourceSE": selSourceSE,
                "DestSE": selTargetSE,
                "Strategy": "MinimiseTotalWait"
            }
            sourceSEs.append(selTargetSE)
            targetSEs.remove(selTargetSE)

        return S_OK(tree)

    def dynamicThroughput(self, sourceSEs, targetSEs):
        """ dynamic throughput - many sources, many targets - find dag that minimises overall throughput 

    :param list sourceSEs: list of available source SE names
    :param list targetSE: list of target SE names
    :param str lfn: logical file name
    :param dict metadata: file metadata read from catalogue
    """
        tree = {}
        primarySources = sourceSEs
        timeToSite = {}
        while targetSEs:
            minTimeToStart = float("inf")
            channels = []
            for targetSE in targetSEs:
                for sourceSE in sourceSEs:
                    ftsChannel = self.ftsGraph.findChannel(sourceSE, targetSE)
                    if not ftsChannel["OK"]:
                        self.log.warn("dynamicThroughput: %s" %
                                      ftsChannel["Message"])
                        continue
                    ftsChannel = ftsChannel["Value"]
                    channels.append((ftsChannel, sourceSE, targetSE))
            ## no candidate channels found
            if not channels:
                msg = "dynamicThroughput: FTS channels between %s and %s are not defined" % (
                    ",".join(sourceSEs), ",".join(targetSEs))
                self.log.error(msg)
                return S_ERROR(msg)
            ## filter out already used channels
            channels = [(channel, sourceSE, targetSE)
                        for channel, sourceSE, targetSE in channels
                        if channel.channelID not in tree]
            if not channels:
                msg = "dynamicThroughput: all FTS channels between %s and %s are already used in tree" % (
                    ",".join(sourceSEs), ",".join(targetSEs))
                self.log.error(msg)
                return S_ERROR(msg)
            ## filter out non-active channels
            self.log.debug(
                "dynamicThroughput: found %s candidate channels, checking activity"
                % len(channels))
            channels = [
                (channel, sourceSE, targetSE)
                for channel, sourceSE, targetSE in channels
                if channel.fromNode.SEs[sourceSE]["read"]
                and channel.toNode.SEs[targetSE]["write"] and channel.status ==
                "Active" and channel.timeToStart < float("inf")
            ]
            if not channels:
                self.log.info(
                    "dynamicThroughput: active candidate channels not found")
                return S_ERROR(
                    "dynamicThroughput: no active candidate FTS channels")

            candidates = []
            selTimeToStart = None
            for channel, sourceSE, targetSE in channels:
                timeToStart = channel.timeToStart
                if sourceSE not in primarySources:
                    timeToStart += self.sigma
                if sourceSE in timeToSite:
                    timeToStart += timeToSite[sourceSE]
                ## local found
                if channel.fromNode == channel.toNode:
                    self.log.debug(
                        "dynamicThroughput: found local channel '%s'" %
                        channel.channelName)
                    candidates = [(channel, sourceSE, targetSE)]
                    selTimeToStart = timeToStart
                    break
                if timeToStart <= minTimeToStart:
                    selTimeToStart = timeToStart
                    minTimeToStart = timeToStart
                    candidates = [(channel, sourceSE, targetSE)]
                elif timeToStart == minTimeToStart:
                    candidates.append((channel, sourceSE, targetSE))

            if not candidates:
                return S_ERROR(
                    "dynamicThroughput: unable to find candidate FTS channels")

            random.shuffle(candidates)
            selChannel, selSourceSE, selTargetSE = candidates[0]
            ancestor = False
            for channelID, treeItem in tree.items():
                if selSourceSE in treeItem["DestSE"]:
                    ancestor = channelID
            tree[selChannel.channelID] = {
                "Ancestor": ancestor,
                "SourceSE": selSourceSE,
                "DestSE": selTargetSE,
                "Strategy": "DynamicThroughput"
            }
            timeToSite[selTargetSE] = selTimeToStart
            sourceSEs.append(selTargetSE)
            targetSEs.remove(selTargetSE)

        return S_OK(tree)

    def reset(self):
        """ reset :chosenStrategy: 

    :param self: self reference
    """
        self.chosenStrategy = 0

    def getSupportedStrategies(self):
        """ Get supported strategies.

    :param self: self reference
    """
        return self.supportedStrategies

    def replicationTree(self, sourceSEs, targetSEs, size, strategy=None):
        """ get replication tree

    :param str lfn: LFN
    :param list sourceSEs: list of sources SE names to use
    :param list targetSEs: liost of target SE names to use
    :param long size: file size
    :param str strategy: strategy name
    """
        ## update SEs rwAccess every rwUpdatePertion timedelta (default 300 s)
        now = datetime.datetime.now()
        if now - self.lastRssUpdate > self.rwUpdatePeriod:
            update = self.updateGraph(rwAccess=True)
            if not update["OK"]:
                self.log.warn(
                    "replicationTree: unable to update FTS graph: %s" %
                    update["Message"])
            else:
                self.lastRssUpdate = now
        ## get strategy
        strategy = strategy if strategy else self.__selectStrategy()
        if strategy not in self.getSupportedStrategies():
            return S_ERROR("replicationTree: unsupported strategy '%s'" %
                           strategy)

        self.log.info( "replicationTree: strategy=%s sourceSEs=%s targetSEs=%s size=%s" %\
                         ( strategy, sourceSEs, targetSEs, size ) )
        ## fire action from dispatcher
        tree = self.strategyDispatcher[strategy](sourceSEs, targetSEs)
        if not tree["OK"]:
            self.log.error("replicationTree: %s" % tree["Message"])
            return tree
        ## update graph edges
        update = self.updateGraph(replicationTree=tree["Value"], size=size)
        if not update["OK"]:
            self.log.error("replicationTree: unable to update FTS graph: %s" %
                           update["Message"])
            return update
        return tree

    def __selectStrategy(self):
        """ If more than one active strategy use one after the other.

    :param self: self reference
    """
        chosenStrategy = self.activeStrategies[self.chosenStrategy]
        self.chosenStrategy += 1
        if self.chosenStrategy == self.numberOfStrategies:
            self.chosenStrategy = 0
        return chosenStrategy

    def __getRWAccessForSE(self, seList):
        """ get RSS R/W for :seList: 

    :param list seList: SE list
    """
        rwDict = dict.fromkeys(seList)
        for se in rwDict:
            rwDict[se] = {"read": False, "write": False}
        rAccess = self.resourceStatus.getStorageElementStatus(
            seList, statusType="ReadAccess", default='Unknown')
        if not rAccess["OK"]:
            return rAccess["Message"]
        rAccess = [
            k for k, v in rAccess["Value"].items()
            if "ReadAccess" in v and v["ReadAccess"] in ("Active", "Degraded")
        ]
        wAccess = self.resourceStatus.getStorageElementStatus(
            seList, statusType="WriteAccess", default='Unknown')
        if not wAccess["OK"]:
            return wAccess["Message"]
        wAccess = [
            k for k, v in wAccess["Value"].items()
            if "WriteAccess" in v and v["WriteAccess"] in ("Active",
                                                           "Degraded")
        ]
        for se in rwDict:
            rwDict[se]["read"] = se in rAccess
            rwDict[se]["write"] = se in wAccess
        return S_OK(rwDict)
Exemplo n.º 35
0
class FTS3Placement( FTSAbstractPlacement ):

  """
  This class manages all the FTS strategies, routes and what not
  """


  __serverPolicy = "Random"
  __nextServerID = 0
  __serverList = None
  __maxAttempts = 0


  def __init__( self, csPath = None, ftsHistoryViews = None ):
    """
        Call the init of the parent, and initialize the list of FTS3 servers
    """

    self.log = gLogger.getSubLogger( "FTS3Placement" )
    super( FTS3Placement, self ).__init__( csPath = csPath, ftsHistoryViews = ftsHistoryViews )
    srvList = getFTS3Servers()
    if not srvList['OK']:
      self.log.error( srvList['Message'] )

    self.__serverList = srvList.get( 'Value', [] )
    self.maxAttempts = len( self.__serverList )

    self.rssClient = ResourceStatus()



  def getReplicationTree( self, sourceSEs, targetSEs, size, strategy = None ):
    """ For multiple source to multiple destination, find the optimal replication
        strategy.

       :param sourceSEs : list of source SE
       :param targetSEs : list of destination SE
       :param size : size of the File
       :param strategy : which strategy to use

       :returns S_OK(dict) < route name :  { dict with key Ancestor, SourceSE, TargetSEtargetSE, Strategy } >

       For the time being, we are waiting for FTS3 to provide advisory mechanisms. So we just use
       simple techniques
    """

    # We will use a single random source
    sourceSE = random.choice( sourceSEs )

    tree = {}
    for targetSE in targetSEs:
      tree["%s#%s" % ( sourceSE, targetSE )] = { "Ancestor" : False, "SourceSE" : sourceSE,
                           "TargetSE" : targetSE, "Strategy" : "FTS3Simple" }

    return S_OK( tree )



  def refresh( self, ftsHistoryViews ):
    """
    Refresh, whatever that means... recalculate all what you need,
    fetches the latest conf and what not.
    """
    return super( FTS3Placement, self ).refresh( ftsHistoryViews = ftsHistoryViews )



  def __failoverServerPolicy(self, attempt = 0):
    """
       Returns always the server at a given position (normally the first one)

       :param attempt: position of the server in the list
    """
    if attempt >= len( self.__serverList ):
      raise Exception( "FTS3Placement.__failoverServerPolicy: attempt to reach non existing server index" )

    return self.__serverList[attempt]

  def __sequenceServerPolicy( self ):
    """
       Every time the this policy is called, return the next server on the list
    """

    fts3server = self.__serverList[self.__nextServerID]
    self.__nextServerID = ( self.__nextServerID + 1 ) % len( self.__serverList )
    return fts3server

  def __randomServerPolicy(self):
    """
      return a random server from the list
    """
    return random.choice( self.__serverList )


  def __chooseFTS3Server( self ):
    """
      Choose the appropriate FTS3 server depending on the policy
    """

    fts3Server = None
    attempt = 0
    # FIXME : need to get real valeu from RSS
    ftsServerStatus = True

    while not fts3Server and attempt < self.maxAttempts:
      if self.__serverPolicy == 'Random':
        fts3Server = self.__randomServerPolicy()
      elif self.__serverPolicy == 'Sequence':
        fts3Server = self.__sequenceServerPolicy()
      elif self.__serverPolicy == 'Failover':
        fts3Server = self.__failoverServerPolicy( attempt = attempt )
      else:
        self.log.error( 'Unknown server policy %s. Using Random instead' % self.__serverPolicy )
        fts3Server = self.__randomServerPolicy()

      if not ftsServerStatus:
        self.log.warn( 'FTS server %s is not in good shape. Choose another one' % fts3Server )
        fts3Server = None
      attempt += 1

        # FIXME : I need to get the FTS server status from RSS
#       ftsStatusFromRss = rss.ftsStatusOrSomethingLikeThat

    if fts3Server:
      return S_OK( fts3Server )

    return S_ERROR ( "Could not find an FTS3 server (max attempt reached)" )

  def findRoute( self, sourceSE, targetSE ):
    """ Find the appropriate route from point A to B
      :param sourceSE : source SE
      :param targetSE : destination SE

      :returns S_OK(FTSRoute)

    """

    fts3server = self.__chooseFTS3Server()

    if not fts3server['OK']:
      return fts3server

    fts3server = fts3server['Value']

    route = FTSRoute( sourceSE, targetSE, fts3server )

    return S_OK( route )

  def isRouteValid( self, route ):
    """
        FIXME: until RSS is ready, I check manually the status
        In FTS3, all routes are valid a priori.
        If a route was not valid for some reason, then FTS would know it
        thanks to the blacklist sent by RSS, and would deal with it itself.
       :param route : FTSRoute

       :returns S_OK or S_ERROR(reason)
    """
    
    rAccess = self.rssClient.getStorageElementStatus( route.sourceSE, "ReadAccess" )
    self.log.debug( "se read %s %s" % ( route.sourceSE, rAccess ) )
    if not rAccess["OK"]:
      self.log.error( rAccess["Message"] )
      return rAccess

    if rAccess["Value"][route.sourceSE]["ReadAccess"] not in ( "Active", "Degraded" ):
      return S_ERROR( "Source SE is not readable" )

    wAccess = self.rssClient.getStorageElementStatus( route.targetSE, "WriteAccess" )
    self.log.debug( "se write %s %s" % ( route.targetSE, wAccess ) )
    if not wAccess["OK"]:
      self.log.error( wAccess["Message"] )
      return wAccess
    if wAccess["Value"][route.targetSE]["WriteAccess"] not in ( "Active", "Degraded" ):
      return S_ERROR( "Target SE is not writable" )

    return S_OK()
Exemplo n.º 36
0
if site:
  res = gConfig.getOptionsDict( '/Resources/Sites/LCG/%s' % site )
  if not res[ 'OK' ]:
    gLogger.error( 'The provided site (%s) is not known.' % site )
    DIRAC.exit( -1 )
  ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )

if not ses:
  gLogger.error( 'There were no SEs provided' )
  DIRAC.exit( -1 )

readBanned = []
writeBanned = []
checkBanned = []

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus( ses )
if not res['OK']:
  gLogger.error( "Storage Element %s does not exist" % ses )
  DIRAC.exit( -1 )

reason = 'Forced with dirac-admin-ban-se by %s' % userName

for se, seOptions in res[ 'Value' ].items():

  resW = resC = resR = { 'OK' : False }

  # Eventually, we will get rid of the notion of InActive, as we always write Banned.
  if read and seOptions.has_key( 'Read' ):
Exemplo n.º 37
0
    def __init__(self,
                 configSection,
                 channels=None,
                 bandwidths=None,
                 failedFiles=None):
        """c'tor

    :param self: self reference
    :param str configSection: path on CS to ReplicationScheduler agent
    :param bandwithds: observed throughput on active channels
    :param channels: active channels
    :param int failedFiles: max number of distinct failed files to allow scheduling
    """
        ## save config section
        self.configSection = configSection + "/" + self.__class__.__name__
        ##

        ## sublogger
        self.log = gLogger.getSubLogger("StrategyHandler", child=True)
        self.log.setLevel(
            gConfig.getValue(self.configSection + "/LogLevel", "DEBUG"))

        self.supportedStrategies = [
            'Simple', 'DynamicThroughput', 'Swarm', 'MinimiseTotalWait'
        ]
        self.log.info("Supported strategies = %s" %
                      ", ".join(self.supportedStrategies))

        self.sigma = gConfig.getValue(self.configSection + '/HopSigma', 0.0)
        self.log.info("HopSigma = %s" % self.sigma)
        self.schedulingType = gConfig.getValue(
            self.configSection + '/SchedulingType', 'File')
        self.log.info("SchedulingType = %s" % self.schedulingType)
        self.activeStrategies = gConfig.getValue(
            self.configSection + '/ActiveStrategies', ['MinimiseTotalWait'])
        self.log.info("ActiveStrategies = %s" %
                      ", ".join(self.activeStrategies))
        self.numberOfStrategies = len(self.activeStrategies)
        self.log.info("Number of active strategies = %s" %
                      self.numberOfStrategies)
        self.acceptableFailureRate = gConfig.getValue(
            self.configSection + '/AcceptableFailureRate', 75)
        self.log.info("AcceptableFailureRate = %s" %
                      self.acceptableFailureRate)
        self.acceptableFailedFiles = gConfig.getValue(
            self.configSection + "/AcceptableFailedFiles", 5)
        self.log.info("AcceptableFailedFiles = %s" %
                      self.acceptableFailedFiles)
        self.rwUpdatePeriod = gConfig.getValue(
            self.configSection + "/RssRWUpdatePeriod", 600)
        self.log.info("RSSUpdatePeriod = %s s" % self.rwUpdatePeriod)
        self.rwUpdatePeriod = datetime.timedelta(seconds=self.rwUpdatePeriod)
        ## bandwithds
        self.bandwidths = bandwidths if bandwidths else {}
        ## channels
        self.channels = channels if channels else {}
        ## distinct failed files per channel
        self.failedFiles = failedFiles if failedFiles else {}
        ## chosen strategy
        self.chosenStrategy = 0
        ## fts graph
        self.ftsGraph = None
        ## timestamp for last update
        self.lastRssUpdate = datetime.datetime.now()
        # dispatcher
        self.strategyDispatcher = {
            "MinimiseTotalWait": self.minimiseTotalWait,
            "DynamicThroughput": self.dynamicThroughput,
            "Simple": self.simple,
            "Swarm": self.swarm
        }
        ## own RSS client
        self.resourceStatus = ResourceStatus()
        ## create fts graph
        ftsGraph = self.setup(self.channels, self.bandwidths, self.failedFiles)
        if not ftsGraph["OK"]:
            raise SHGraphCreationError(ftsGraph["Message"])
        self.log.info("%s has been constructed" % self.__class__.__name__)
Exemplo n.º 38
0
if site:
    res = gConfig.getOptionsDict('/Resources/Sites/LCG/%s' % site)
    if not res['OK']:
        gLogger.error('The provided site (%s) is not known.' % site)
        DIRAC.exit(-1)
    ses.extend(res['Value']['SE'].replace(' ', '').split(','))
if not ses:
    gLogger.error('There were no SEs provided')
    DIRAC.exit()

readAllowed = []
writeAllowed = []
checkAllowed = []
removeAllowed = []

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus(ses)
if not res['OK']:
    gLogger.error('Storage Element %s does not exist' % ses)
    DIRAC.exit(-1)

reason = 'Forced with dirac-admin-allow-se by %s' % userName

for se, seOptions in res['Value'].items():

    resW = resC = resR = {'OK': False}

    # InActive is used on the CS model, Banned is the equivalent in RSS
    if read and seOptions.has_key('ReadAccess'):
Exemplo n.º 39
0
  def __init__( self, name, protocols = None, vo = None ):
    """ c'tor

    :param str name: SE name
    :param list protocols: requested protocols
    """

    self.vo = vo
    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return result
      self.vo = result['Value']
    self.opHelper = Operations( vo = self.vo )
    self.resources = Resources( vo = self.vo )

    proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
    result = self.resources.getAccessProtocols( name )
    if result['OK']:
      ap = result['Value'][0]
      useProxy = ( self.resources.getAccessProtocolValue( ap, "Protocol", "UnknownProtocol" )
                   in proxiedProtocols )

    #print "Proxy", name, proxiedProtocols, \
    #gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "xxx" )

    if not useProxy:
      useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
    if not useProxy:
      useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )

    self.valid = True
    if protocols == None:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
    else:
      res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
    if not res['OK']:
      self.valid = False
      self.name = name
      self.errorReason = res['Message']
    else:
      factoryDict = res['Value']
      self.name = factoryDict['StorageName']
      self.options = factoryDict['StorageOptions']
      self.localProtocols = factoryDict['LocalProtocols']
      self.remoteProtocols = factoryDict['RemoteProtocols']
      self.storages = factoryDict['StorageObjects']
      self.protocolOptions = factoryDict['ProtocolOptions']
      self.turlProtocols = factoryDict['TurlProtocols']

    self.log = gLogger.getSubLogger( "SE[%s]" % self.name )

    self.readMethods = [ 'getFile',
                         'getAccessUrl',
                         'getTransportURL',
                         'prestageFile',
                         'prestageFileStatus',
                         'getDirectory']

    self.writeMethods = [ 'retransferOnlineFile',
                          'putFile',
                          'replicateFile',
                          'pinFile',
                          'releaseFile',
                          'createDirectory',
                          'putDirectory' ]

    self.removeMethods = [ 'removeFile', 'removeDirectory' ]

    self.checkMethods = [ 'exists',
                          'getDirectoryMetadata',
                          'getDirectorySize',
                          'getFileSize',
                          'getFileMetadata',
                          'listDirectory',
                          'isDirectory',
                          'isFile',
                           ]

    self.okMethods = [ 'getLocalProtocols',
                       'getPfnForProtocol',
                       'getPfnForLfn',
                       'getPfnPath',
                       'getProtocols',
                       'getRemoteProtocols',
                       'getStorageElementName',
                       'getStorageElementOption',
                       'getStorageParameters',
                       'isLocalSE' ]

    self.__resourceStatus = ResourceStatus()
Exemplo n.º 40
0
    DIRAC.exit()

STATUS_TYPES = ["ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess"]
ALLOWED_STATUSES = ["Unknown", "InActive", "Banned", "Probing", "Degraded"]

statusAllowedDict = {}
for statusType in STATUS_TYPES:
    statusAllowedDict[statusType] = []

statusFlagDict = {}
statusFlagDict["ReadAccess"] = read
statusFlagDict["WriteAccess"] = write
statusFlagDict["CheckAccess"] = check
statusFlagDict["RemoveAccess"] = remove

resourceStatus = ResourceStatus()

res = resourceStatus.getStorageElementStatus(ses)
if not res["OK"]:
    gLogger.error("Storage Element %s does not exist" % ses)
    DIRAC.exit(-1)

reason = "Forced with dirac-admin-allow-se by %s" % userName

for se, seOptions in res["Value"].items():

    resW = resC = resR = {"OK": False}

    # InActive is used on the CS model, Banned is the equivalent in RSS
    for statusType in STATUS_TYPES:
        if statusFlagDict[statusType]:
Exemplo n.º 41
0
class StorageFactory:

  def __init__( self, useProxy = False, vo = None ):
    self.rootConfigPath = '/Resources/StorageElements'
    self.proxy = False
    self.proxy = useProxy
    self.resourceStatus = ResourceStatus()
    self.vo = vo
    if self.vo is None:
      result = getVOfromProxyGroup()
      if result['OK']:
        self.vo = result['Value']
      else:
        RuntimeError( "Can not get the current VO context" )
    self.remotePlugins = []
    self.localPlugins = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []

  ###########################################################################################
  #
  # Below are public methods for obtaining storage objects
  #

  def getStorageName( self, initialName ):
    return self._getConfigStorageName( initialName, 'Alias' )

  def getStorage( self, parameterDict ):
    """ This instantiates a single storage for the details provided and doesn't check the CS.
    """
    # The storage name must be supplied.
    if parameterDict.has_key( 'StorageName' ):
      storageName = parameterDict['StorageName']
    else:
      errStr = "StorageFactory.getStorage: StorageName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    # PluginName must be supplied otherwise nothing with work.
    if parameterDict.has_key( 'PluginName' ):
      pluginName = parameterDict['PluginName']
    # Temporary fix for backward compatibility
    elif parameterDict.has_key( 'ProtocolName' ):
      pluginName = parameterDict['ProtocolName']
    else:
      errStr = "StorageFactory.getStorage: PluginName must be supplied"
      gLogger.error( errStr )
      return S_ERROR( errStr )

    return self.__generateStorageObject( storageName, pluginName, parameterDict )

  def getStorages( self, storageName, pluginList = None ):
    """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS

        'storageName' is the DIRAC SE name i.e. 'CERN-RAW'
        'pluginList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1']
    """
    self.remotePlugins = []
    self.localPlugins = []
    self.name = ''
    self.options = {}
    self.protocolDetails = []
    self.storages = []
    if pluginList is None:
      pluginList = []
    if not self.vo:
      return S_ERROR( 'Mandatory vo parameter is not defined' )

    # Get the name of the storage provided
    res = self._getConfigStorageName( storageName, 'Alias' )
    if not res['OK']:
      return res
    storageName = res['Value']
    self.name = storageName

    # In case the storage is made from a base SE, get this information
    res = self._getConfigStorageName( storageName, 'BaseSE' )
    if not res['OK']:
      return res
    storageName = res['Value']

    # Get the options defined in the CS for this storage
    res = self._getConfigStorageOptions( storageName )
    if not res['OK']:
      return res
    self.options = res['Value']

    # Get the protocol specific details
    res = self._getConfigStorageProtocols( storageName )
    if not res['OK']:
      return res
    self.protocolDetails = res['Value']

    requestedLocalPlugins = []
    requestedRemotePlugins = []
    requestedProtocolDetails = []
    turlProtocols = []
    # Generate the protocol specific plug-ins
    for protocolDict in self.protocolDetails:
      pluginName = protocolDict.get( 'PluginName' )
      if pluginList and pluginName not in pluginList:
        continue
      protocol = protocolDict['Protocol']
      result = self.__generateStorageObject( storageName, pluginName, protocolDict )
      if result['OK']:
        self.storages.append( result['Value'] )
        if pluginName in self.localPlugins:
          turlProtocols.append( protocol )
          requestedLocalPlugins.append( pluginName )
        if pluginName in self.remotePlugins:
          requestedRemotePlugins.append( pluginName )
        requestedProtocolDetails.append( protocolDict )
      else:
        gLogger.info( result['Message'] )

    if len( self.storages ) > 0:
      resDict = {}
      resDict['StorageName'] = self.name
      resDict['StorageOptions'] = self.options
      resDict['StorageObjects'] = self.storages
      resDict['LocalPlugins'] = requestedLocalPlugins
      resDict['RemotePlugins'] = requestedRemotePlugins
      resDict['ProtocolOptions'] = requestedProtocolDetails
      resDict['TurlProtocols'] = turlProtocols
      return S_OK( resDict )
    else:
      errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols."
      gLogger.error( errStr, self.name )
      return S_ERROR( errStr )
  ###########################################################################################
  #
  # Below are internal methods for obtaining section/option/value configuration
  #

  def _getConfigStorageName( self, storageName, referenceType ):
    """
      This gets the name of the storage the configuration service.
      If the storage is a reference to another SE the resolution is performed.

      'storageName' is the storage section to check in the CS
    """
    configPath = '%s/%s' % ( self.rootConfigPath, storageName )
    res = gConfig.getOptions( configPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageName: Failed to get storage options"
      gLogger.error( errStr, res['Message'] )
      return S_ERROR( errStr )
    if not res['Value']:
      errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist."
      gLogger.error( errStr, configPath )
      return S_ERROR( errStr )
    if referenceType in res['Value']:
      configPath = cfgPath( self.rootConfigPath, storageName, referenceType )
      referenceName = gConfig.getValue( configPath )
      result = self._getConfigStorageName( referenceName, 'Alias' )
      if not result['OK']:
        return result
      resolvedName = result['Value']
    else:
      resolvedName = storageName
    return S_OK( resolvedName )

  def _getConfigStorageOptions( self, storageName ):
    """ Get the options associated to the StorageElement as defined in the CS
    """
    storageConfigPath = cfgPath( self.rootConfigPath, storageName )
    res = gConfig.getOptions( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage options."
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    options = res['Value']
    optionsDict = {}
    for option in options:

      if option in [ 'ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess']:
        continue
      optionConfigPath = cfgPath( storageConfigPath, option )
      if option in [ 'VO' ]:
        optionsDict[option] = gConfig.getValue( optionConfigPath, [] )
      else:
        optionsDict[option] = gConfig.getValue( optionConfigPath, '' )

    res = self.resourceStatus.getStorageElementStatus( storageName )
    if not res[ 'OK' ]:
      errStr = "StorageFactory._getStorageOptions: Failed to get storage status"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )

    # For safety, we did not add the ${statusType}Access keys
    # this requires modifications in the StorageElement class

    # We add the dictionary with the statusTypes and values
    # { 'statusType1' : 'status1', 'statusType2' : 'status2' ... }
    optionsDict.update( res[ 'Value' ][ storageName ] )

    return S_OK( optionsDict )

  def _getConfigStorageProtocols( self, storageName ):
    """ Protocol specific information is present as sections in the Storage configuration
    """
    storageConfigPath = cfgPath( self.rootConfigPath, storageName )
    res = gConfig.getSections( storageConfigPath )
    if not res['OK']:
      errStr = "StorageFactory._getConfigStorageProtocols: Failed to get storage sections"
      gLogger.error( errStr, "%s: %s" % ( storageName, res['Message'] ) )
      return S_ERROR( errStr )
    protocolSections = res['Value']
    sortedProtocolSections = sortList( protocolSections )
    protocolDetails = []
    for protocolSection in sortedProtocolSections:
      res = self._getConfigStorageProtocolDetails( storageName, protocolSection )
      if not res['OK']:
        return res
      protocolDetails.append( res['Value'] )
    return S_OK( protocolDetails )

  def _getConfigStorageProtocolDetails( self, storageName, protocolSection ):
    """
      Parse the contents of the protocol block
    """
    # First obtain the options that are available
    protocolConfigPath = cfgPath( self.rootConfigPath, storageName, protocolSection )
    res = gConfig.getOptions( protocolConfigPath )
    if not res['OK']:
      errStr = "StorageFactory.__getProtocolDetails: Failed to get protocol options."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
      return S_ERROR( errStr )
    options = res['Value']

    # We must have certain values internally even if not supplied in CS
    protocolDict = {'Access':'', 'Host':'', 'Path':'', 'Port':'', 'Protocol':'', 'PluginName':'', 'SpaceToken':'', 'WSUrl':''}
    for option in options:
      configPath = cfgPath( protocolConfigPath, option )
      optionValue = gConfig.getValue( configPath, '' )
      protocolDict[option] = optionValue

    # This is a temporary for backward compatibility
    if "ProtocolName" in protocolDict and not protocolDict['PluginName']:
      protocolDict['PluginName'] = protocolDict['ProtocolName']
    protocolDict.pop( 'ProtocolName', None )

    # Evaluate the base path taking into account possible VO specific setting
    if self.vo:
      result = gConfig.getOptionsDict( cfgPath( protocolConfigPath, 'VOPath' ) )
      voPath = ''
      if result['OK']:
        voPath = result['Value'].get( self.vo, '' )
      if voPath:
        protocolDict['Path'] = voPath

    # Now update the local and remote protocol lists.
    # A warning will be given if the Access option is not set.
    if protocolDict['Access'].lower() == 'remote':
      self.remotePlugins.append( protocolDict['PluginName'] )
    elif protocolDict['Access'].lower() == 'local':
      self.localPlugins.append( protocolDict['PluginName'] )
    else:
      errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % ( storageName, protocolSection )
      gLogger.warn( errStr )

    # The PluginName option must be defined
    if not protocolDict['PluginName']:
      errStr = "StorageFactory.__getProtocolDetails: 'PluginName' option is not defined."
      gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) )
      return S_ERROR( errStr )

    return S_OK( protocolDict )

  ###########################################################################################
  #
  # Below is the method for obtaining the object instantiated for a provided storage configuration
  #

  def __generateStorageObject( self, storageName, pluginName, parameters ):

    storageType = pluginName
    if self.proxy:
      storageType = 'Proxy'

    objectLoader = ObjectLoader()
    result = objectLoader.loadObject( 'Resources.Storage.%sStorage' % storageType, storageType + 'Storage' )
    if not result['OK']:
      gLogger.error( 'Failed to load storage object: %s' % result['Message'] )
      return result

    storageClass = result['Value']
    try:
      storage = storageClass( storageName, parameters )
    except Exception, x:
      errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s: %s" % ( storageName, x )
      gLogger.exception( errStr )
      return S_ERROR( errStr )

    return S_OK( storage )
Exemplo n.º 42
0
def main():
    read = False
    write = False
    check = False
    remove = False
    site = ""
    mute = False

    Script.registerSwitch("r", "AllowRead", "     Allow only reading from the storage element")
    Script.registerSwitch("w", "AllowWrite", "     Allow only writing to the storage element")
    Script.registerSwitch("k", "AllowCheck", "     Allow only check access to the storage element")
    Script.registerSwitch("v", "AllowRemove", "    Allow only remove access to the storage element")
    Script.registerSwitch("a", "All", "    Allow all access to the storage element")
    Script.registerSwitch("m", "Mute", "     Do not send email")
    Script.registerSwitch("S:", "Site=", "     Allow all SEs associated to site")
    # Registering arguments will automatically add their description to the help menu
    Script.registerArgument(["seGroupList: list of SEs or comma-separated SEs"])

    switches, ses = Script.parseCommandLine(ignoreErrors=True)

    for switch in switches:
        if switch[0].lower() in ("r", "allowread"):
            read = True
        if switch[0].lower() in ("w", "allowwrite"):
            write = True
        if switch[0].lower() in ("k", "allowcheck"):
            check = True
        if switch[0].lower() in ("v", "allowremove"):
            remove = True
        if switch[0].lower() in ("a", "all"):
            read = True
            write = True
            check = True
            remove = True
        if switch[0].lower() in ("m", "mute"):
            mute = True
        if switch[0].lower() in ("s", "site"):
            site = switch[1]

    # imports
    from DIRAC import gConfig, gLogger
    from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
    from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites
    from DIRAC.Core.Security.ProxyInfo import getProxyInfo
    from DIRAC.DataManagementSystem.Utilities.DMSHelpers import resolveSEGroup
    from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
    from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus

    if not (read or write or check or remove):
        # No switch was specified, means we need all of them
        gLogger.notice("No option given, all accesses will be allowed if they were not")
        read = True
        write = True
        check = True
        remove = True

    ses = resolveSEGroup(ses)
    diracAdmin = DiracAdmin()
    errorList = []
    setup = gConfig.getValue("/DIRAC/Setup", "")
    if not setup:
        print("ERROR: Could not contact Configuration Service")
        DIRAC.exit(2)

    res = getProxyInfo()
    if not res["OK"]:
        gLogger.error("Failed to get proxy information", res["Message"])
        DIRAC.exit(2)

    userName = res["Value"].get("username")
    if not userName:
        gLogger.error("Failed to get username for proxy")
        DIRAC.exit(2)

    if site:
        res = getSites()
        if not res["OK"]:
            gLogger.error(res["Message"])
            DIRAC.exit(-1)
        if site not in res["Value"]:
            gLogger.error("The provided site (%s) is not known." % site)
            DIRAC.exit(-1)
        ses.extend(res["Value"]["SE"].replace(" ", "").split(","))
    if not ses:
        gLogger.error("There were no SEs provided")
        DIRAC.exit()

    STATUS_TYPES = ["ReadAccess", "WriteAccess", "CheckAccess", "RemoveAccess"]
    ALLOWED_STATUSES = ["Unknown", "InActive", "Banned", "Probing", "Degraded"]

    statusAllowedDict = {}
    for statusType in STATUS_TYPES:
        statusAllowedDict[statusType] = []

    statusFlagDict = {}
    statusFlagDict["ReadAccess"] = read
    statusFlagDict["WriteAccess"] = write
    statusFlagDict["CheckAccess"] = check
    statusFlagDict["RemoveAccess"] = remove

    resourceStatus = ResourceStatus()

    res = resourceStatus.getElementStatus(ses, "StorageElement")
    if not res["OK"]:
        gLogger.error("Storage Element %s does not exist" % ses)
        DIRAC.exit(-1)

    reason = "Forced with dirac-admin-allow-se by %s" % userName

    for se, seOptions in res["Value"].items():
        # InActive is used on the CS model, Banned is the equivalent in RSS
        for statusType in STATUS_TYPES:
            if statusFlagDict[statusType]:
                if seOptions.get(statusType) == "Active":
                    gLogger.notice("%s status of %s is already Active" % (statusType, se))
                    continue
                if statusType in seOptions:
                    if not seOptions[statusType] in ALLOWED_STATUSES:
                        gLogger.notice(
                            "%s option for %s is %s, instead of %s"
                            % (statusType, se, seOptions["ReadAccess"], ALLOWED_STATUSES)
                        )
                        gLogger.notice("Try specifying the command switches")
                    else:
                        resR = resourceStatus.setElementStatus(
                            se, "StorageElement", statusType, "Active", reason, userName
                        )
                        if not resR["OK"]:
                            gLogger.fatal(
                                "Failed to update %s %s to Active, exit -" % (se, statusType), resR["Message"]
                            )
                            DIRAC.exit(-1)
                        else:
                            gLogger.notice("Successfully updated %s %s to Active" % (se, statusType))
                            statusAllowedDict[statusType].append(se)

    totalAllowed = 0
    totalAllowedSEs = []
    for statusType in STATUS_TYPES:
        totalAllowed += len(statusAllowedDict[statusType])
        totalAllowedSEs += statusAllowedDict[statusType]
    totalAllowedSEs = list(set(totalAllowedSEs))

    if not totalAllowed:
        gLogger.info("No storage elements were allowed")
        DIRAC.exit(-1)

    if mute:
        gLogger.notice("Email is muted by script switch")
        DIRAC.exit(0)

    subject = "%s storage elements allowed for use" % len(totalAllowedSEs)
    addressPath = "EMail/Production"
    address = Operations().getValue(addressPath, "")

    body = ""
    if read:
        body = "%s\n\nThe following storage elements were allowed for reading:" % body
        for se in statusAllowedDict["ReadAccess"]:
            body = "%s\n%s" % (body, se)
    if write:
        body = "%s\n\nThe following storage elements were allowed for writing:" % body
        for se in statusAllowedDict["WriteAccess"]:
            body = "%s\n%s" % (body, se)
    if check:
        body = "%s\n\nThe following storage elements were allowed for checking:" % body
        for se in statusAllowedDict["CheckAccess"]:
            body = "%s\n%s" % (body, se)
    if remove:
        body = "%s\n\nThe following storage elements were allowed for removing:" % body
        for se in statusAllowedDict["RemoveAccess"]:
            body = "%s\n%s" % (body, se)

    if not address:
        gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
        DIRAC.exit(0)

    res = diracAdmin.sendMail(address, subject, body)
    gLogger.notice("Notifying %s" % address)
    if res["OK"]:
        gLogger.notice(res["Value"])
    else:
        gLogger.notice(res["Message"])

    DIRAC.exit(0)
Exemplo n.º 43
0
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Utilities.List                        import sortList

storageCFGBase = "/Resources/StorageElements"

res = gConfig.getSections( storageCFGBase, True )
if not res[ 'OK' ]:
  gLogger.error( 'Failed to get storage element info' )
  gLogger.error( res[ 'Message' ] )
  DIRAC.exit( -1 )
  
gLogger.info( "%s %s %s" % ( 'Storage Element'.ljust( 25 ), 'Read Status'.rjust( 15 ), 'Write Status'.rjust( 15 ) ) )

seList = sortList( res[ 'Value' ] )

resourceStatus = ResourceStatus()
 
res    = resourceStatus.getStorageElementStatus( seList )
if not res[ 'OK' ]:
  gLogger.error( "Failed to get StorageElement status for %s" % str( seList ) )

for k,v in res[ 'Value' ].items():
  
  readState, writeState = 'Active', 'Active'
  
  if v.has_key( 'Read' ):
    readState = v[ 'Read' ]  
  
  if v.has_key( 'Write' ):
    writeState = v[ 'Write']
  gLogger.notice("%s %s %s" % ( k.ljust(25),readState.rjust(15),writeState.rjust(15)) )
Exemplo n.º 44
0
    def executeForVO(self, vo):
        """
        Perform the synchronisation for one VO.

        :param vo: VO name
        :return: S_OK or S_ERROR
        """

        rSS = ResourceStatus()

        try:
            try:
                self.log.info(
                    "Login to Rucio as privileged user with host cert/key")
                certKeyTuple = Locations.getHostCertificateAndKeyLocation()
                if not certKeyTuple:
                    self.log.error("Hostcert/key location not set")
                    return S_ERROR("Hostcert/key location not set")
                hostcert, hostkey = certKeyTuple

                self.log.info("Logging in with a host cert/key pair:")
                self.log.debug("account: ",
                               self.clientConfig[vo]["privilegedAccount"])
                self.log.debug("rucio host: ",
                               self.clientConfig[vo]["rucioHost"])
                self.log.debug("auth  host: ",
                               self.clientConfig[vo]["authHost"])
                self.log.debug("CA cert path: ", self.caCertPath)
                self.log.debug("Cert location: ", hostcert)
                self.log.debug("Key location: ", hostkey)
                self.log.debug("VO: ", vo)

                client = Client(
                    account=self.clientConfig[vo]["privilegedAccount"],
                    rucio_host=self.clientConfig[vo]["rucioHost"],
                    auth_host=self.clientConfig[vo]["authHost"],
                    ca_cert=self.caCertPath,
                    auth_type="x509",
                    creds={
                        "client_cert": hostcert,
                        "client_key": hostkey
                    },
                    timeout=600,
                    user_agent="rucio-clients",
                    vo=vo,
                )
            except Exception as err:
                self.log.info(
                    "Login to Rucio as privileged user with host cert/key failed. Try username/password"
                )
                client = Client(account="root", auth_type="userpass")
        except Exception as exc:
            # login exception, skip this VO
            self.log.exception("Login for VO failed. VO skipped ",
                               "VO=%s" % vo,
                               lException=exc)
            return S_ERROR(str(format_exc()))

        self.log.info(
            " Rucio login successful - continue with the RSS synchronisation")
        # return S_OK()
        try:
            for rse in client.list_rses():
                thisSe = rse["rse"]
                self.log.info("Checking Dirac SE status for %s" % thisSe)
                resStatus = rSS.getElementStatus(thisSe,
                                                 "StorageElement",
                                                 vO=vo)
                dictSe = client.get_rse(thisSe)
                if resStatus["OK"]:
                    self.log.debug("SE status ", resStatus["Value"])
                    seAccessValue = resStatus["Value"][thisSe]
                    availabilityRead = True if seAccessValue["ReadAccess"] in [
                        "Active", "Degraded"
                    ] else False
                    availabilityWrite = True if seAccessValue[
                        "WriteAccess"] in ["Active", "Degraded"] else False
                    availabilityDelete = True if seAccessValue[
                        "RemoveAccess"] in ["Active", "Degraded"] else False
                    isUpdated = False
                    if dictSe["availability_read"] != availabilityRead:
                        self.log.info(
                            "Set availability_read for RSE",
                            "RSE: %s, availability: %s" %
                            (thisSe, availabilityRead))
                        client.update_rse(
                            thisSe, {"availability_read": availabilityRead})
                        isUpdated = True
                    if dictSe["availability_write"] != availabilityWrite:
                        self.log.info(
                            "Set availability_write for RSE",
                            "RSE: %s, availability: %s" %
                            (thisSe, availabilityWrite))
                        client.update_rse(
                            thisSe, {"availability_write": availabilityWrite})
                        isUpdated = True
                    if dictSe["availability_delete"] != availabilityDelete:
                        self.log.info(
                            "Set availability_delete for RSE",
                            "RSE: %s, availability: %s" %
                            (thisSe, availabilityDelete),
                        )
                        client.update_rse(
                            thisSe,
                            {"availability_delete": availabilityDelete})
                        isUpdated = True
        except Exception as err:
            return S_ERROR(str(err))
        return S_OK()