Esempio n. 1
0
class StorageElementCache(object):

  def __init__(self):
    self.seCache = DictCache()

  def __call__(self, name, plugins=None, vo=None, hideExceptions=False):
    self.seCache.purgeExpired(expiredInSeconds=60)
    tId = threading.current_thread().ident

    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      vo = result['Value']

    # Because the gfal2 context caches the proxy location,
    # we also use the proxy location as a key.
    # In practice, there should almost always be one, except for the REA
    # If we see its memory consumtpion exploding, this might be a place to look
    proxyLoc = getProxyLocation()

    argTuple = (tId, name, plugins, vo, proxyLoc)
    seObj = self.seCache.get(argTuple)

    if not seObj:
      seObj = StorageElementItem(name, plugins, vo, hideExceptions=hideExceptions)
      # Add the StorageElement to the cache for 1/2 hour
      self.seCache.add(argTuple, 1800, seObj)

    return seObj
Esempio n. 2
0
class PlotCache:

  def __init__( self, plotsLocation = False ):
    self.plotsLocation = plotsLocation
    self.alive = True
    self.__graphCache = DictCache( deleteFunction = _deleteGraph )
    self.__graphLifeTime = 600
    self.purgeThread = threading.Thread( target = self.purgeExpired )
    self.purgeThread.setDaemon( 1 )
    self.purgeThread.start()

  def setPlotsLocation( self, plotsDir ):
    self.plotsLocation = plotsDir
    for plot in os.listdir( self.plotsLocation ):
      if plot.find( ".png" ) > 0:
        plotLocation = "%s/%s" % ( self.plotsLocation, plot )
        gLogger.verbose( "Purging %s" % plotLocation )
        os.unlink( plotLocation )

  def purgeExpired( self ):
    while self.alive:
      time.sleep( self.__graphLifeTime )
      self.__graphCache.purgeExpired()

  def getPlot( self, plotHash, plotData, plotMetadata, subplotMetadata ):
    """
    Get plot from the cache if exists, else generate it
    """

    plotDict = self.__graphCache.get( plotHash )
    if plotDict == False:
      basePlotFileName = "%s/%s.png" % ( self.plotsLocation, plotHash )
      if subplotMetadata:
        retVal = graph( plotData, basePlotFileName, plotMetadata, metadata = subplotMetadata )
      else:
        retVal = graph( plotData, basePlotFileName, plotMetadata )
      if not retVal[ 'OK' ]:
        return retVal
      plotDict = retVal[ 'Value' ]
      if plotDict[ 'plot' ]:
        plotDict[ 'plot' ] = os.path.basename( basePlotFileName )
      self.__graphCache.add( plotHash, self.__graphLifeTime, plotDict )
    return S_OK( plotDict )

  def getPlotData( self, plotFileName ):
    filename = "%s/%s" % ( self.plotsLocation, plotFileName )
    try:
      fd = file( filename, "rb" )
      data = fd.read()
      fd.close()
    except Exception, v:
      return S_ERROR( "Can't open file %s: %s" % ( plotFileName, str( v ) ) )
    return S_OK( data )
Esempio n. 3
0
class StorageElementCache(object):
    def __init__(self):
        self.seCache = DictCache()

    def __call__(self, name, protocols=None, vo=None, hideExceptions=False):
        self.seCache.purgeExpired(expiredInSeconds=60)
        argTuple = (name, protocols, vo)
        seObj = self.seCache.get(argTuple)

        if not seObj:
            seObj = StorageElementItem(name, protocols, vo, hideExceptions=hideExceptions)
            # Add the StorageElement to the cache for 1/2 hour
            self.seCache.add(argTuple, 1800, seObj)

        return seObj
Esempio n. 4
0
class StorageElementCache( object ):

  def __init__( self ):
    self.seCache = DictCache()

  def __call__( self, name, plugins = None, vo = None, hideExceptions = False ):
    self.seCache.purgeExpired( expiredInSeconds = 60 )
    tId = threading.current_thread().ident

    if not vo:
      result = getVOfromProxyGroup()
      if not result['OK']:
        return
      vo = result['Value']

    argTuple = ( tId, name, plugins, vo )
    seObj = self.seCache.get( argTuple )

    if not seObj:
      seObj = StorageElementItem( name, plugins, vo, hideExceptions = hideExceptions )
      # Add the StorageElement to the cache for 1/2 hour
      self.seCache.add( argTuple, 1800, seObj )

    return seObj
Esempio n. 5
0
class DataCache:
    def __init__(self):
        self.graphsLocation = os.path.join(
            gConfig.getValue('/LocalSite/InstancePath', rootPath), 'data',
            'accountingPlots')
        self.cachedGraphs = {}
        self.alive = True
        self.purgeThread = threading.Thread(target=self.purgeExpired)
        self.purgeThread.setDaemon(1)
        self.purgeThread.start()
        self.__dataCache = DictCache()
        self.__graphCache = DictCache(deleteFunction=self._deleteGraph)
        self.__dataLifeTime = 600
        self.__graphLifeTime = 3600

    def setGraphsLocation(self, graphsDir):
        self.graphsLocation = graphsDir
        for graphName in os.listdir(self.graphsLocation):
            if graphName.find(".png") > 0:
                graphLocation = "%s/%s" % (self.graphsLocation, graphName)
                gLogger.verbose("Purging %s" % graphLocation)
                os.unlink(graphLocation)

    def purgeExpired(self):
        while self.alive:
            time.sleep(600)
            self.__graphCache.purgeExpired()
            self.__dataCache.purgeExpired()

    def getReportData(self, reportRequest, reportHash, dataFunc):
        """
    Get report data from cache if exists, else generate it
    """
        reportData = self.__dataCache.get(reportHash)
        if reportData == False:
            retVal = dataFunc(reportRequest)
            if not retVal['OK']:
                return retVal
            reportData = retVal['Value']
            self.__dataCache.add(reportHash, self.__dataLifeTime, reportData)
        return S_OK(reportData)

    def getReportPlot(self, reportRequest, reportHash, reportData, plotFunc):
        """
    Get report data from cache if exists, else generate it
    """
        plotDict = self.__graphCache.get(reportHash)
        if plotDict == False:
            basePlotFileName = "%s/%s" % (self.graphsLocation, reportHash)
            retVal = plotFunc(reportRequest, reportData, basePlotFileName)
            if not retVal['OK']:
                return retVal
            plotDict = retVal['Value']
            if plotDict['plot']:
                plotDict['plot'] = "%s.png" % reportHash
            if plotDict['thumbnail']:
                plotDict['thumbnail'] = "%s.thb.png" % reportHash
            self.__graphCache.add(reportHash, self.__graphLifeTime, plotDict)
        return S_OK(plotDict)

    def getPlotData(self, plotFileName):
        filename = "%s/%s" % (self.graphsLocation, plotFileName)
        try:
            fd = file(filename, "rb")
            data = fd.read()
            fd.close()
        except Exception, e:
            return S_ERROR("Can't open file %s: %s" % (plotFileName, str(e)))
        return S_OK(data)
Esempio n. 6
0
class GatewayService( Service ):

  GATEWAY_NAME = "Framework/Gateway"

  def __init__( self ):
    Service.__init__( self, GatewayService.GATEWAY_NAME )
    self.__delegatedCredentials = DictCache()
    self.__transferBytesLimit = 1024 * 1024 * 100

  def initialize( self ):
    #Build the URLs
    self._url = self._cfg.getURL()
    if not self._url:
      return S_ERROR( "Could not build service URL for %s" % GatewayService.GATEWAY_NAME )
    gLogger.verbose( "Service URL is %s" % self._url )
    #Discover Handler
    self._initMonitoring()
    self._threadPool = ThreadPool( 1,
                                    max( 0, self._cfg.getMaxThreads() ),
                                    self._cfg.getMaxWaitingPetitions() )
    self._threadPool.daemonize()
    self._msgBroker = MessageBroker( "%sMSB" % GatewayService.GATEWAY_NAME, threadPool = self._threadPool )
    self._msgBroker.useMessageObjects( False )
    getGlobalMessageBroker().useMessageObjects( False )
    self._msgForwarder = MessageForwarder( self._msgBroker )
    return S_OK()

  #Threaded process function
  def _processInThread( self, clientTransport ):
    #Handshake
    try:
      clientTransport.handshake()
    except:
      return
    #Add to the transport pool
    trid = self._transportPool.add( clientTransport )
    if not trid:
      return
    #Receive and check proposal
    result = self._receiveAndCheckProposal( trid )
    if not result[ 'OK' ]:
      self._transportPool.sendAndClose( trid, result )
      return
    proposalTuple = result[ 'Value' ]
    #Instantiate handler
    result = self.__getClientInitArgs( trid, proposalTuple )
    if not result[ 'OK' ]:
      self._transportPool.sendAndClose( trid, result )
      return
    clientInitArgs = result[ 'Value' ]
    #Execute the action
    result = self._processProposal( trid, proposalTuple, clientInitArgs )
    #Close the connection if required
    if result[ 'closeTransport' ]:
      self._transportPool.close( trid )
    return result

  def _receiveAndCheckProposal( self, trid ):
    clientTransport = self._transportPool.get( trid )
    #Get the peer credentials
    credDict = clientTransport.getConnectingCredentials()
    #Receive the action proposal
    retVal = clientTransport.receiveData( 1024 )
    if not retVal[ 'OK' ]:
      gLogger.error( "Invalid action proposal", "%s %s" % ( self._createIdentityString( credDict,
                                                                                        clientTransport ),
                                                            retVal[ 'Message' ] ) )
      return S_ERROR( "Invalid action proposal" )
    proposalTuple = retVal[ 'Value' ]
    gLogger.debug( "Received action from client", "/".join( list( proposalTuple[1] ) ) )
    #Check if there are extra credentials
    if proposalTuple[2]:
      clientTransport.setExtraCredentials( proposalTuple[2] )
    return S_OK( proposalTuple )

  def __getClientInitArgs( self, trid, proposalTuple ):
    clientTransport = self._transportPool.get( trid )
    #Get the peer credentials
    credDict = clientTransport.getConnectingCredentials()
    if 'x509Chain' not in credDict:
      return S_OK()
    cKey = ( credDict[ 'DN' ],
             credDict.get( 'group', False ),
             credDict.get( 'extraCredentials', False ),
             credDict[ 'isLimitedProxy' ] )
    dP = self.__delegatedCredentials.get( cKey, 3600 )
    idString = self._createIdentityString( credDict, clientTransport )
    if dP:
      gLogger.verbose( "Proxy for %s is cached" % idString )
      return S_OK( dP )
    result = self.__requestDelegation( clientTransport, credDict )
    if not result[ 'OK' ]:
      gLogger.warn( "Could not get proxy for %s: %s" % ( idString, result[ 'Message' ] ) )
      return result
    delChain = result[ 'Value' ]
    delegatedChain = delChain.dumpAllToString()[ 'Value' ]
    secsLeft = delChain.getRemainingSecs()[ 'Value' ] - 1
    clientInitArgs = {
                        BaseClient.KW_SETUP : proposalTuple[0][1],
                        BaseClient.KW_TIMEOUT : 600,
                        BaseClient.KW_IGNORE_GATEWAYS : True,
                        BaseClient.KW_USE_CERTIFICATES : False,
                        BaseClient.KW_PROXY_STRING : delegatedChain
                        }
    if BaseClient.KW_EXTRA_CREDENTIALS in credDict:
      clientInitArgs[ BaseClient.KW_EXTRA_CREDENTIALS ] = credDict[ BaseClient.KW_EXTRA_CREDENTIALS ]
    gLogger.warn( "Got delegated proxy for %s: %s secs left" % ( idString, secsLeft ) )
    self.__delegatedCredentials.add( cKey, secsLeft, clientInitArgs )
    return S_OK( clientInitArgs )

  def __requestDelegation( self, clientTransport, credDict ):
    peerChain = credDict[ 'x509Chain' ]
    retVal = peerChain.getCertInChain()[ 'Value' ].generateProxyRequest()
    if not retVal[ 'OK' ]:
      return retVal
    delegationRequest = retVal[ 'Value' ]
    retVal = delegationRequest.dumpRequest()
    if not retVal[ 'OK' ]:
      retVal = S_ERROR( "Server Error: Can't generate delegation request" )
      clientTransport.sendData( retVal )
      return retVal
    gLogger.info( "Sending delegation request for %s" % delegationRequest.getSubjectDN()[ 'Value' ] )
    clientTransport.sendData( S_OK( { 'delegate' : retVal[ 'Value' ] } ) )
    delegatedCertChain = clientTransport.receiveData()
    delegatedChain = X509Chain( keyObj = delegationRequest.getPKey() )
    retVal = delegatedChain.loadChainFromString( delegatedCertChain )
    if not retVal[ 'OK' ]:
      retVal = S_ERROR( "Error in receiving delegated proxy: %s" % retVal[ 'Message' ] )
      clientTransport.sendData( retVal )
      return retVal
    return S_OK( delegatedChain )

  #Msg

  def _mbConnect( self, trid, clientInitArgs ):
    return S_OK()

  def _mbReceivedMsg( self, cliTrid, msgObj ):
    return self._msgForwarder.msgFromClient( cliTrid, msgObj )

  def _mbDisconnect( self, cliTrid ):
    self._msgForwarder.cliDisconnect( cliTrid )

  #Execute action

  def _executeAction( self, trid, proposalTuple, clientInitArgs ):
    clientTransport = self._transportPool.get( trid )
    credDict = clientTransport.getConnectingCredentials()
    targetService = proposalTuple[0][0]
    actionType = proposalTuple[1][0]
    actionMethod = proposalTuple[1][1]
    idString = self._createIdentityString( credDict, clientTransport )
    #OOkay! Lets do the magic!
    retVal = clientTransport.receiveData()
    if not retVal[ 'OK' ]:
      gLogger.error( "Error while receiving file description", retVal[ 'Message' ] )
      clientTransport.sendData( S_ERROR( "Error while receiving file description: %s" % retVal[ 'Message' ] ) )
      return
    if actionType == "FileTransfer":
      gLogger.warn( "Received a file transfer action from %s" % idString )
      clientTransport.sendData( S_OK( "Accepted" ) )
      retVal = self.__forwardFileTransferCall( targetService, clientInitArgs,
                                                actionMethod, retVal[ 'Value' ], clientTransport )
    elif actionType == "RPC":
      gLogger.info( "Forwarding %s/%s action to %s for %s" % ( actionType, actionMethod, targetService, idString ) )
      retVal = self.__forwardRPCCall( targetService, clientInitArgs, actionMethod, retVal[ 'Value' ] )
    elif actionType == "Connection" and actionMethod == "new":
      gLogger.info( "Initiating a messaging connection to %s for %s" % ( targetService, idString ) )
      retVal = self._msgForwarder.addClient( trid, targetService, clientInitArgs, retVal[ 'Value' ] )
    else:
      gLogger.warn( "Received an invalid %s/%s action from %s" % ( actionType, actionMethod, idString ) )
      retVal = S_ERROR( "Unknown type of action (%s)" % actionType )
    #TODO: Send back the data?
    if 'rpcStub' in retVal:
      retVal.pop( 'rpcStub' )
    clientTransport.sendData( retVal )
    return retVal

  def __forwardRPCCall( self, targetService, clientInitArgs, method, params ):
    if targetService == "Configuration/Server":
      if method == "getCompressedDataIfNewer":
        #Relay CS data directly
        serviceVersion = gConfigurationData.getVersion()
        retDict = { 'newestVersion' : serviceVersion }
        clientVersion = params[0]
        if clientVersion < serviceVersion:
          retDict[ 'data' ] = gConfigurationData.getCompressedData()
        return S_OK( retDict )
    #Default
    rpcClient = RPCClient( targetService, **clientInitArgs )
    methodObj = getattr( rpcClient, method )
    return methodObj( *params )

  def __forwardFileTransferCall( self, targetService, clientInitArgs, method,
                                 params, clientTransport ):
    transferRelay = TransferRelay( targetService, **clientInitArgs )
    transferRelay.setTransferLimit( self.__transferBytesLimit )
    cliFH = FileHelper( clientTransport )
    #Check file size
    if method.find( "ToClient" ) > -1:
      cliFH.setDirection( "send" )
    elif method.find( "FromClient" ) > -1:
      cliFH.setDirection( "receive" )
      if not self.__ftCheckMaxTransferSize( params[2] ):
        cliFH.markAsTransferred()
        return S_ERROR( "Transfer size is too big" )
    #Forward queries
    try:
      relayMethodObject = getattr( transferRelay, 'forward%s' % method )
    except:
      return S_ERROR( "Cannot forward unknown method %s" % method )
    result = relayMethodObject( cliFH, params )
    return result

  def __ftCheckMaxTransferSize( self, requestedTransferSize ):
    if not self.__transferBytesLimit:
      return True
    if not requestedTransferSize:
      return True
    if requestedTransferSize <= self.__transferBytesLimit:
      return True
    return False
Esempio n. 7
0
class CredentialsClient:

  CONSUMER_GRACE_TIME = 3600
  REQUEST_GRACE_TIME = 900

  def __init__( self, RPCFunctor = None ):
    if not RPCFunctor:
      self.__RPCFunctor = RPCClient
    else:
      self.__RPCFunctor = RPCFunctor
    self.__tokens = DictCache()
    self.__requests = DictCache()
    self.__consumers = DictCache( deleteFunction = self.__cleanConsumerCache )

  def __getRPC( self ):
    return self.__RPCFunctor( "WebAPI/Credentials" )

  def __cleanReturn( self, result ):
    if 'rpcStub' in result:
      result.pop( 'rpcStub' )
    return result

  ##
  # Consumer
  ##

  def generateConsumerPair( self, name, callback, icon, consumerKey = "" ):
    result = self.__getRPC().generateConsumerPair( name, callback, icon, consumerKey )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    self.__consumers.add( consumerKey, self.CONSUMER_GRACE_TIME, result[ 'Value' ] )
    return self.__cleanReturn( result )

  def getConsumerData( self, consumerKey ):
    cData = self.__consumers.get( consumerKey )
    if cData:
      return S_OK( cData )
    result = self.__getRPC().getConsumerData( consumerKey )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    self.__consumers.add( consumerKey, self.CONSUMER_GRACE_TIME, result[ 'Value' ] )
    return self.__cleanReturn( result )

  def deleteConsumer( self, consumerKey ):
    self.__consumers.delete( consumerKey )
    result = self.__getRPC().deleteConsumer( consumerKey )
    if result[ 'OK' ]:
      self.__cleanConsumerCache( { 'key' : consumerKey } )
    return self.__cleanReturn( result )

  def getAllConsumers( self ):
    result = self.__getRPC().getAllConsumers()
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    data = result[ 'Value' ]
    consIndex = { 'key': 0,
                  'name' : 0,
                  'callback' : 0,
                  'secret' : 0,
                  'icon' : 0 }
    for key in consIndex:
      consIndex[ key ] = data[ 'Parameters' ].find( key )
    for record in data[ 'Records' ]:
      consData = {}
      for key in consIndex:
        consData[ key ] = record[ consIndex[ key ] ]
      self.__consumers.add( consData[ 'key' ], self.CONSUMER_GRACE_TIME, consData )
    return self.__cleanReturn( result )

  def __cleanConsumerCache( self, cData ):
    consumerKey = cData[ 'key' ]
    for dc in ( self.__tokens, self.__requests ):
      cKeys = dc.getKeys()
      for cKey in cKeys:
        if cKey[0] == consumerKey:
          dc.delete( cKey )

  ##
  # Requests
  ##

  def generateRequest( self, consumerKey, callback = "" ):
    result = self.__getRPC().generateRequest( consumerKey, callback )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    requestData = result[ 'Value' ]
    self.__requests.add( requestData[ 'request' ], result[ 'lifeTime' ] - 5, requestData )
    return self.__cleanReturn( result )

  def getRequestData( self, request ):
    data = self.__requests.get( request )
    if data:
      return S_OK( data )
    result = self.__getRPC().getRequestData( request )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    self.__tokens.add( request, result[ 'lifeTime' ] - 5, result[ 'Value' ] )
    return self.__cleanReturn( result )

  def deleteRequest( self, request ):
    result = self.__getRPC().deleteRequest( request )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    cKeys = self.__requests.getKeys()
    for cKey in cKeys:
      if cKey[1] == request:
        self.__requests.delete( cKey )
    return self.__cleanReturn( result )

  ##
  # Verifiers
  ##

  def generateVerifier( self, consumerKey, request, userDN, userGroup, lifeTime = 3600 ):
    result = self.__getRPC().generateVerifier( consumerKey, request, userDN, userGroup, lifeTime )
    return self.__cleanReturn( result )

  def getVerifierData( self, verifier ):
    result = self.__getRPC().getVerifierData( verifier )
    return self.__cleanReturn( result )

  def deleteVerifier( self, verifier ):
    result = self.__getRPC().deleteVerifier( verifier )
    return self.__cleanReturn( result )

  def findVerifier( self, consumerKey, request ):
    result = self.__getRPC().findVerifier( consumerKey, request )
    return self.__cleanReturn( result )

  def setVerifierProperties( self, consumerKey, request, verifier,
                                 userDN, userGroup, lifeTime ):
    result = self.__getRPC().setVerifierProperties( consumerKey, request, verifier,
                                                  userDN, userGroup, lifeTime )
    return self.__cleanReturn( result )


  ##
  # Tokens
  ##

  def generateToken( self, consumerKey, request, verifier ):
    result = self.__getRPC().generateToken( consumerKey, request, verifier )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    tokenData = result[ 'Value' ]
    cKey = ( consumerKey, tokenData[ 'token' ] )
    self.__tokens.add( cKey, tokenData[ 'lifeTime' ] - 5, tokenData )
    return S_OK( tokenData )

  def getTokenData( self, consumerKey, token ):
    cKey = ( consumerKey, token )
    tokenData = self.__tokens.get( cKey )
    if tokenData:
      return S_OK( tokenData )
    result = self.__getRPC().getTokenData( consumerKey, token )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    tokenData = result[ 'Value' ]
    self.__tokens.add( cKey, tokenData[ 'lifeTime' ] - 5, tokenData )
    return self.__cleanReturn( result )

  def revokeUserToken( self, userDN, userGroup, token ):
    result = self.__getRPC().revokeUserToken( userDN, userGroup, token )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    cKeys = self.__tokens.getKeys()
    for cKey in cKeys:
      if cKey[0] == userDN and cKey[1] == userGroup and cKey[3] == token:
        self.__tokens.delete( cKey )
    return self.__cleanReturn( result )

  def revokeToken( self, token ):
    result = self.__getRPC().revokeToken( token )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    cKeys = self.__tokens.getKeys()
    for cKey in cKeys:
      if cKey[3] == token:
        self.__tokens.delete( cKey )
    return self.__cleanReturn( result )

  def cleanExpired( self ):
    return self.__getRPC().cleanExpired()

  def getTokens( self, condDict = {} ):
    result = self.__getRPC().getTokens( condDict )
    if not result[ 'OK' ]:
      return self.__cleanReturn( result )
    params = result[ 'Value' ][ 'Parameters']
    data = result[ 'Value' ][ 'Records' ]
    consumerKey = "unknown"
    token = unknown
    lifeTime = 0
    for record in data:
      tokenData = {}
      for iPos in range( len( params ) ):
        if params[iPos] == "UserDN":
          tokenData[ 'userDN' ] = record[iPos]
        elif params[iPos] == "UserGroup":
          tokenData[ 'userGroup' ] = record[iPos]
        elif params[iPos] == "ConsumerKey":
          consumerKey = record[iPos]
        elif params[iPos] == "Token":
          token = record[iPos]
        elif params[iPos] == "Secret":
          tokenData[ 'secret' ] = record[iPos]
        elif params[iPos] == "LifeTime":
          tokenData[ 'lifeTime' ] = record[iPos]
          lifeTime = record[ iPos ]
      self.__tokens.add( ( consumerKey, token ), tokenData[ 'lifeTime' ], tokenData )
    return self.__cleanReturn( result )
Esempio n. 8
0
class ProxyManagerClient(object):
  def __init__(self):
    self.__usersCache = DictCache()
    self.__proxiesCache = DictCache()
    self.__vomsProxiesCache = DictCache()
    self.__pilotProxiesCache = DictCache()
    self.__filesCache = DictCache(self.__deleteTemporalFile)

  def __deleteTemporalFile(self, filename):
    """ Delete temporal file

        :param str filename: path to file
    """
    try:
      os.remove(filename)
    except Exception:
      pass

  def clearCaches(self):
    """ Clear caches
    """
    self.__usersCache.purgeAll()
    self.__proxiesCache.purgeAll()
    self.__vomsProxiesCache.purgeAll()
    self.__pilotProxiesCache.purgeAll()

  def __getSecondsLeftToExpiration(self, expiration, utc=True):
    """ Get time left to expiration in a seconds

        :param datetime expiration:
        :param boolean utc: time in utc

        :return: datetime
    """
    if utc:
      td = expiration - datetime.datetime.utcnow()
    else:
      td = expiration - datetime.datetime.now()
    return td.days * 86400 + td.seconds

  def __refreshUserCache(self, validSeconds=0):
    """ Refresh user cache

        :param int validSeconds: required seconds the proxy is valid for

        :return: S_OK()/S_ERROR()
    """
    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    retVal = rpcClient.getRegisteredUsers(validSeconds)
    if not retVal['OK']:
      return retVal
    data = retVal['Value']
    # Update the cache
    for record in data:
      cacheKey = (record['DN'], record['group'])
      self.__usersCache.add(cacheKey,
                            self.__getSecondsLeftToExpiration(record['expirationtime']),
                            record)
    return S_OK()

  @gUsersSync
  def userHasProxy(self, userDN, userGroup, validSeconds=0):
    """ Check if a user(DN-group) has a proxy in the proxy management
        Updates internal cache if needed to minimize queries to the service

        :param str userDN: user DN
        :param str userGroup: user group
        :param int validSeconds: proxy valid time in a seconds

        :return: S_OK()/S_ERROR()
    """

    # For backward compatibility reasons with versions prior to v7r1
    # we need to check for proxy with a group
    # AND for groupless proxy even if not specified

    cacheKeys = ((userDN, userGroup), (userDN, ''))
    for cacheKey in cacheKeys:
      if self.__usersCache.exists(cacheKey, validSeconds):
        return S_OK(True)

    # Get list of users from the DB with proxys at least 300 seconds
    gLogger.verbose("Updating list of users in proxy management")
    retVal = self.__refreshUserCache(validSeconds)
    if not retVal['OK']:
      return retVal

    for cacheKey in cacheKeys:
      if self.__usersCache.exists(cacheKey, validSeconds):
        return S_OK(True)

    return S_OK(False)

  @gUsersSync
  def getUserPersistence(self, userDN, userGroup, validSeconds=0):
    """ Check if a user(DN-group) has a proxy in the proxy management
        Updates internal cache if needed to minimize queries to the service

        :param str userDN: user DN
        :param str userGroup: user group
        :param int validSeconds: proxy valid time in a seconds

        :return: S_OK()/S_ERROR()
    """
    cacheKey = (userDN, userGroup)
    userData = self.__usersCache.get(cacheKey, validSeconds)
    if userData:
      if userData['persistent']:
        return S_OK(True)
    # Get list of users from the DB with proxys at least 300 seconds
    gLogger.verbose("Updating list of users in proxy management")
    retVal = self.__refreshUserCache(validSeconds)
    if not retVal['OK']:
      return retVal
    userData = self.__usersCache.get(cacheKey, validSeconds)
    if userData:
      return S_OK(userData['persistent'])
    return S_OK(False)

  def setPersistency(self, userDN, userGroup, persistent):
    """ Set the persistency for user/group

        :param str userDN: user DN
        :param str userGroup: user group
        :param boolean persistent: presistent flag

        :return: S_OK()/S_ERROR()
    """
    # Hack to ensure bool in the rpc call
    persistentFlag = True
    if not persistent:
      persistentFlag = False
    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    retVal = rpcClient.setPersistency(userDN, userGroup, persistentFlag)
    if not retVal['OK']:
      return retVal
    # Update internal persistency cache
    cacheKey = (userDN, userGroup)
    record = self.__usersCache.get(cacheKey, 0)
    if record:
      record['persistent'] = persistentFlag
      self.__usersCache.add(cacheKey,
                            self.__getSecondsLeftToExpiration(record['expirationtime']),
                            record)
    return retVal

  def uploadProxy(self, proxy=None, restrictLifeTime=0, rfcIfPossible=False):
    """ Upload a proxy to the proxy management service using delegation

        :param X509Chain proxy: proxy as a chain
        :param int restrictLifeTime: proxy live time in a seconds
        :param boolean rfcIfPossible: make rfc proxy if possible

        :return: S_OK(dict)/S_ERROR() -- dict contain proxies
    """
    # Discover proxy location
    if isinstance(proxy, X509Chain):
      chain = proxy
      proxyLocation = ""
    else:
      if not proxy:
        proxyLocation = Locations.getProxyLocation()
        if not proxyLocation:
          return S_ERROR("Can't find a valid proxy")
      elif isinstance(proxy, six.string_types):
        proxyLocation = proxy
      else:
        return S_ERROR("Can't find a valid proxy")
      chain = X509Chain()
      result = chain.loadProxyFromFile(proxyLocation)
      if not result['OK']:
        return S_ERROR("Can't load %s: %s " % (proxyLocation, result['Message']))

    # Make sure it's valid
    if chain.hasExpired().get('Value'):
      return S_ERROR("Proxy %s has expired" % proxyLocation)
    if chain.getDIRACGroup().get('Value') or chain.isVOMS().get('Value'):
      return S_ERROR("Cannot upload proxy with DIRAC group or VOMS extensions")

    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    # Get a delegation request
    result = rpcClient.requestDelegationUpload(chain.getRemainingSecs()['Value'])
    if not result['OK']:
      return result
    reqDict = result['Value']
    # Generate delegated chain
    chainLifeTime = chain.getRemainingSecs()['Value'] - 60
    if restrictLifeTime and restrictLifeTime < chainLifeTime:
      chainLifeTime = restrictLifeTime
    retVal = chain.generateChainFromRequestString(reqDict['request'],
                                                  lifetime=chainLifeTime,
                                                  rfc=rfcIfPossible)
    if not retVal['OK']:
      return retVal
    # Upload!
    result = rpcClient.completeDelegationUpload(reqDict['id'], retVal['Value'])
    if not result['OK']:
      return result
    return S_OK(result.get('proxies') or result['Value'])

  @gProxiesSync
  def downloadProxy(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
                    cacheTime=14400, proxyToConnect=None, token=None):
    """ Get a proxy Chain from the proxy management

        :param str userDN: user DN
        :param str userGroup: user group
        :param boolean limited: if need limited proxy
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param int cacheTime: store in a cache time in a seconds
        :param X509Chain proxyToConnect: proxy as a chain
        :param str token: valid token to get a proxy

        :return: S_OK(X509Chain)/S_ERROR()
    """
    cacheKey = (userDN, userGroup)
    if self.__proxiesCache.exists(cacheKey, requiredTimeLeft):
      return S_OK(self.__proxiesCache.get(cacheKey))
    req = X509Request()
    req.generateProxyRequest(limited=limited)
    if proxyToConnect:
      rpcClient = RPCClient("Framework/ProxyManager", proxyChain=proxyToConnect, timeout=120)
    else:
      rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    if token:
      retVal = rpcClient.getProxyWithToken(userDN, userGroup, req.dumpRequest()['Value'],
                                           int(cacheTime + requiredTimeLeft), token)
    else:
      retVal = rpcClient.getProxy(userDN, userGroup, req.dumpRequest()['Value'],
                                  int(cacheTime + requiredTimeLeft))
    if not retVal['OK']:
      return retVal
    chain = X509Chain(keyObj=req.getPKey())
    retVal = chain.loadChainFromString(retVal['Value'])
    if not retVal['OK']:
      return retVal
    self.__proxiesCache.add(cacheKey, chain.getRemainingSecs()['Value'], chain)
    return S_OK(chain)

  def downloadProxyToFile(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
                          cacheTime=14400, filePath=None, proxyToConnect=None, token=None):
    """ Get a proxy Chain from the proxy management and write it to file

        :param str userDN: user DN
        :param str userGroup: user group
        :param boolean limited: if need limited proxy
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param int cacheTime: store in a cache time in a seconds
        :param str filePath: path to save proxy
        :param X509Chain proxyToConnect: proxy as a chain
        :param str token: valid token to get a proxy

        :return: S_OK(X509Chain)/S_ERROR()
    """
    retVal = self.downloadProxy(userDN, userGroup, limited, requiredTimeLeft, cacheTime, proxyToConnect, token)
    if not retVal['OK']:
      return retVal
    chain = retVal['Value']
    retVal = self.dumpProxyToFile(chain, filePath)
    if not retVal['OK']:
      return retVal
    retVal['chain'] = chain
    return retVal

  @gVOMSProxiesSync
  def downloadVOMSProxy(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
                        cacheTime=14400, requiredVOMSAttribute=None,
                        proxyToConnect=None, token=None):
    """ Download a proxy if needed and transform it into a VOMS one

        :param str userDN: user DN
        :param str userGroup: user group
        :param boolean limited: if need limited proxy
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param int cacheTime: store in a cache time in a seconds
        :param str requiredVOMSAttribute: VOMS attr to add to the proxy
        :param X509Chain proxyToConnect: proxy as a chain
        :param str token: valid token to get a proxy

        :return: S_OK(X509Chain)/S_ERROR()
    """
    cacheKey = (userDN, userGroup, requiredVOMSAttribute, limited)
    if self.__vomsProxiesCache.exists(cacheKey, requiredTimeLeft):
      return S_OK(self.__vomsProxiesCache.get(cacheKey))
    req = X509Request()
    req.generateProxyRequest(limited=limited)
    if proxyToConnect:
      rpcClient = RPCClient("Framework/ProxyManager", proxyChain=proxyToConnect, timeout=120)
    else:
      rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    if token:
      retVal = rpcClient.getVOMSProxyWithToken(userDN, userGroup, req.dumpRequest()['Value'],
                                               int(cacheTime + requiredTimeLeft), token, requiredVOMSAttribute)

    else:
      retVal = rpcClient.getVOMSProxy(userDN, userGroup, req.dumpRequest()['Value'],
                                      int(cacheTime + requiredTimeLeft), requiredVOMSAttribute)
    if not retVal['OK']:
      return retVal
    chain = X509Chain(keyObj=req.getPKey())
    retVal = chain.loadChainFromString(retVal['Value'])
    if not retVal['OK']:
      return retVal
    self.__vomsProxiesCache.add(cacheKey, chain.getRemainingSecs()['Value'], chain)
    return S_OK(chain)

  def downloadVOMSProxyToFile(self, userDN, userGroup, limited=False, requiredTimeLeft=1200,
                              cacheTime=14400, requiredVOMSAttribute=None, filePath=None,
                              proxyToConnect=None, token=None):
    """ Download a proxy if needed, transform it into a VOMS one and write it to file

        :param str userDN: user DN
        :param str userGroup: user group
        :param boolean limited: if need limited proxy
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param int cacheTime: store in a cache time in a seconds
        :param str requiredVOMSAttribute: VOMS attr to add to the proxy
        :param str filePath: path to save proxy
        :param X509Chain proxyToConnect: proxy as a chain
        :param str token: valid token to get a proxy

        :return: S_OK(X509Chain)/S_ERROR()
    """
    retVal = self.downloadVOMSProxy(userDN, userGroup, limited, requiredTimeLeft, cacheTime,
                                    requiredVOMSAttribute, proxyToConnect, token)
    if not retVal['OK']:
      return retVal
    chain = retVal['Value']
    retVal = self.dumpProxyToFile(chain, filePath)
    if not retVal['OK']:
      return retVal
    retVal['chain'] = chain
    return retVal

  def getPilotProxyFromDIRACGroup(self, userDN, userGroup, requiredTimeLeft=43200, proxyToConnect=None):
    """ Download a pilot proxy with VOMS extensions depending on the group

        :param str userDN: user DN
        :param str userGroup: user group
        :param int requiredTimeLeft: required proxy live time in seconds
        :param X509Chain proxyToConnect: proxy as a chain

        :return: S_OK(X509Chain)/S_ERROR()
    """
    # Assign VOMS attribute
    vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
    if not vomsAttr:
      gLogger.warn("No voms attribute assigned to group %s when requested pilot proxy" % userGroup)
      return self.downloadProxy(userDN, userGroup, limited=False, requiredTimeLeft=requiredTimeLeft,
                                proxyToConnect=proxyToConnect)
    else:
      return self.downloadVOMSProxy(userDN, userGroup, limited=False, requiredTimeLeft=requiredTimeLeft,
                                    requiredVOMSAttribute=vomsAttr, proxyToConnect=proxyToConnect)

  def getPilotProxyFromVOMSGroup(self, userDN, vomsAttr, requiredTimeLeft=43200, proxyToConnect=None):
    """ Download a pilot proxy with VOMS extensions depending on the group

        :param str userDN: user DN
        :param str vomsAttr: VOMS attribute
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param X509Chain proxyToConnect: proxy as a chain

        :return: S_OK(X509Chain)/S_ERROR()
    """
    groups = Registry.getGroupsWithVOMSAttribute(vomsAttr)
    if not groups:
      return S_ERROR("No group found that has %s as voms attrs" % vomsAttr)

    for userGroup in groups:
      result = self.downloadVOMSProxy(userDN, userGroup,
                                      limited=False,
                                      requiredTimeLeft=requiredTimeLeft,
                                      requiredVOMSAttribute=vomsAttr,
                                      proxyToConnect=proxyToConnect)
      if result['OK']:
        return result
    return result

  def getPayloadProxyFromDIRACGroup(self, userDN, userGroup, requiredTimeLeft, token=None, proxyToConnect=None):
    """ Download a payload proxy with VOMS extensions depending on the group

        :param str userDN: user DN
        :param str userGroup: user group
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param str token: valid token to get a proxy
        :param X509Chain proxyToConnect: proxy as a chain

        :return: S_OK(X509Chain)/S_ERROR()
    """
    # Assign VOMS attribute
    vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
    if not vomsAttr:
      gLogger.verbose("No voms attribute assigned to group %s when requested payload proxy" % userGroup)
      return self.downloadProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
                                proxyToConnect=proxyToConnect, token=token)
    else:
      return self.downloadVOMSProxy(userDN, userGroup, limited=True, requiredTimeLeft=requiredTimeLeft,
                                    requiredVOMSAttribute=vomsAttr, proxyToConnect=proxyToConnect,
                                    token=token)

  def getPayloadProxyFromVOMSGroup(self, userDN, vomsAttr, token, requiredTimeLeft, proxyToConnect=None):
    """ Download a payload proxy with VOMS extensions depending on the VOMS attr

        :param str userDN: user DN
        :param str vomsAttr: VOMS attribute
        :param str token: valid token to get a proxy
        :param int requiredTimeLeft: required proxy live time in a seconds
        :param X509Chain proxyToConnect: proxy as a chain

        :return: S_OK(X509Chain)/S_ERROR()
    """
    groups = Registry.getGroupsWithVOMSAttribute(vomsAttr)
    if not groups:
      return S_ERROR("No group found that has %s as voms attrs" % vomsAttr)
    userGroup = groups[0]

    return self.downloadVOMSProxy(userDN,
                                  userGroup,
                                  limited=True,
                                  requiredTimeLeft=requiredTimeLeft,
                                  requiredVOMSAttribute=vomsAttr,
                                  proxyToConnect=proxyToConnect,
                                  token=token)

  def dumpProxyToFile(self, chain, destinationFile=None, requiredTimeLeft=600):
    """ Dump a proxy to a file. It's cached so multiple calls won't generate extra files

        :param X509Chain chain: proxy as a chain
        :param str destinationFile: path to store proxy
        :param int requiredTimeLeft: required proxy live time in a seconds

        :return: S_OK(str)/S_ERROR()
    """
    result = chain.hash()
    if not result['OK']:
      return result
    cHash = result['Value']
    if self.__filesCache.exists(cHash, requiredTimeLeft):
      filepath = self.__filesCache.get(cHash)
      if filepath and os.path.isfile(filepath):
        return S_OK(filepath)
      self.__filesCache.delete(cHash)
    retVal = chain.dumpAllToFile(destinationFile)
    if not retVal['OK']:
      return retVal
    filename = retVal['Value']
    self.__filesCache.add(cHash, chain.getRemainingSecs()['Value'], filename)
    return S_OK(filename)

  def deleteGeneratedProxyFile(self, chain):
    """ Delete a file generated by a dump

        :param X509Chain chain: proxy as a chain

        :return: S_OK()
    """
    self.__filesCache.delete(chain)
    return S_OK()

  def deleteProxyBundle(self, idList):
    """ delete a list of id's

        :param list,tuple idList: list of identity numbers

        :return: S_OK(int)/S_ERROR()
    """
    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    return rpcClient.deleteProxyBundle(idList)

  def requestToken(self, requesterDN, requesterGroup, numUses=1):
    """ Request a number of tokens. usesList must be a list of integers and each integer is the number of uses a token
        must have

        :param str requesterDN: user DN
        :param str requesterGroup: user group
        :param int numUses: number of uses

        :return: S_OK(tuple)/S_ERROR() -- tuple contain token, number uses
    """
    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    return rpcClient.generateToken(requesterDN, requesterGroup, numUses)

  def renewProxy(self, proxyToBeRenewed=None, minLifeTime=3600, newProxyLifeTime=43200, proxyToConnect=None):
    """ Renew a proxy using the ProxyManager

        :param X509Chain proxyToBeRenewed: proxy to renew
        :param int minLifeTime: if proxy life time is less than this, renew. Skip otherwise
        :param int newProxyLifeTime: life time of new proxy
        :param X509Chain proxyToConnect: proxy to use for connecting to the service

        :return: S_OK(X509Chain)/S_ERROR()
    """
    retVal = multiProxyArgument(proxyToBeRenewed)
    if not retVal['Value']:
      return retVal
    proxyToRenewDict = retVal['Value']

    secs = proxyToRenewDict['chain'].getRemainingSecs()['Value']
    if secs > minLifeTime:
      deleteMultiProxy(proxyToRenewDict)
      return S_OK()

    if not proxyToConnect:
      proxyToConnectDict = {'chain': False, 'tempFile': False}
    else:
      retVal = multiProxyArgument(proxyToConnect)
      if not retVal['Value']:
        deleteMultiProxy(proxyToRenewDict)
        return retVal
      proxyToConnectDict = retVal['Value']

    userDN = proxyToRenewDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
    retVal = proxyToRenewDict['chain'].getDIRACGroup()
    if not retVal['OK']:
      deleteMultiProxy(proxyToRenewDict)
      deleteMultiProxy(proxyToConnectDict)
      return retVal
    userGroup = retVal['Value']
    limited = proxyToRenewDict['chain'].isLimitedProxy()['Value']

    voms = VOMS()
    retVal = voms.getVOMSAttributes(proxyToRenewDict['chain'])
    if not retVal['OK']:
      deleteMultiProxy(proxyToRenewDict)
      deleteMultiProxy(proxyToConnectDict)
      return retVal
    vomsAttrs = retVal['Value']
    if vomsAttrs:
      retVal = self.downloadVOMSProxy(userDN,
                                      userGroup,
                                      limited=limited,
                                      requiredTimeLeft=newProxyLifeTime,
                                      requiredVOMSAttribute=vomsAttrs[0],
                                      proxyToConnect=proxyToConnectDict['chain'])
    else:
      retVal = self.downloadProxy(userDN,
                                  userGroup,
                                  limited=limited,
                                  requiredTimeLeft=newProxyLifeTime,
                                  proxyToConnect=proxyToConnectDict['chain'])

    deleteMultiProxy(proxyToRenewDict)
    deleteMultiProxy(proxyToConnectDict)

    if not retVal['OK']:
      return retVal

    chain = retVal['Value']

    if not proxyToRenewDict['tempFile']:
      return chain.dumpAllToFile(proxyToRenewDict['file'])

    return S_OK(chain)

  def getDBContents(self, condDict={}, sorting=[['UserDN', 'DESC']], start=0, limit=0):
    """ Get the contents of the db

        :param dict condDict: search condition

        :return: S_OK(dict)/S_ERROR() -- dict contain fields, record list, total records
    """
    rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
    return rpcClient.getContents(condDict, sorting, start, limit)

  def getVOMSAttributes(self, chain):
    """ Get the voms attributes for a chain

        :param X509Chain chain: proxy as a chain

        :return: S_OK(str)/S_ERROR()
    """
    return VOMS().getVOMSAttributes(chain)

  def getUploadedProxyLifeTime(self, DN, group):
    """ Get the remaining seconds for an uploaded proxy

        :param str DN: user DN
        :param str group: group

        :return: S_OK(int)/S_ERROR()
    """
    result = self.getDBContents({'UserDN': [DN], 'UserGroup': [group]})
    if not result['OK']:
      return result
    data = result['Value']
    if len(data['Records']) == 0:
      return S_OK(0)
    pNames = list(data['ParameterNames'])
    dnPos = pNames.index('UserDN')
    groupPos = pNames.index('UserGroup')
    expiryPos = pNames.index('ExpirationTime')
    for row in data['Records']:
      if DN == row[dnPos] and group == row[groupPos]:
        td = row[expiryPos] - datetime.datetime.utcnow()
        secondsLeft = td.days * 86400 + td.seconds
        return S_OK(max(0, secondsLeft))
    return S_OK(0)

  def getUserProxiesInfo(self):
    """ Get the user proxies uploaded info

        :return: S_OK(dict)/S_ERROR()
    """
    result = RPCClient("Framework/ProxyManager", timeout=120).getUserProxiesInfo()
    if 'rpcStub' in result:
      result.pop('rpcStub')
    return result
Esempio n. 9
0
class DataCache:

  def __init__( self ):
    self.graphsLocation = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data', 'accountingPlots' )
    self.cachedGraphs = {}
    self.alive = True
    self.purgeThread = threading.Thread( target = self.purgeExpired )
    self.purgeThread.setDaemon( 1 )
    self.purgeThread.start()
    self.__dataCache = DictCache()
    self.__graphCache = DictCache( deleteFunction = self._deleteGraph )
    self.__dataLifeTime = 600
    self.__graphLifeTime = 3600

  def setGraphsLocation( self, graphsDir ):
    self.graphsLocation = graphsDir
    for graphName in os.listdir( self.graphsLocation ):
      if graphName.find( ".png" ) > 0:
        graphLocation = "%s/%s" % ( self.graphsLocation, graphName )
        gLogger.verbose( "Purging %s" % graphLocation )
        os.unlink( graphLocation )

  def purgeExpired( self ):
    while self.alive:
      time.sleep( 600 )
      self.__graphCache.purgeExpired()
      self.__dataCache.purgeExpired()

  def getReportData( self, reportRequest, reportHash, dataFunc ):
    """
    Get report data from cache if exists, else generate it
    """
    reportData = self.__dataCache.get( reportHash )
    if reportData == False:
      retVal = dataFunc( reportRequest )
      if not retVal[ 'OK' ]:
        return retVal
      reportData = retVal[ 'Value' ]
      self.__dataCache.add( reportHash, self.__dataLifeTime, reportData )
    return S_OK( reportData )

  def getReportPlot( self, reportRequest, reportHash, reportData, plotFunc ):
    """
    Get report data from cache if exists, else generate it
    """
    plotDict = self.__graphCache.get( reportHash )
    if plotDict == False:
      basePlotFileName = "%s/%s" % ( self.graphsLocation, reportHash )
      retVal = plotFunc( reportRequest, reportData, basePlotFileName )
      if not retVal[ 'OK' ]:
        return retVal
      plotDict = retVal[ 'Value' ]
      if plotDict[ 'plot' ]:
        plotDict[ 'plot' ] = "%s.png" % reportHash
      if plotDict[ 'thumbnail' ]:
        plotDict[ 'thumbnail' ] = "%s.thb.png" % reportHash
      self.__graphCache.add( reportHash, self.__graphLifeTime, plotDict )
    return S_OK( plotDict )

  def getPlotData( self, plotFileName ):
    filename = "%s/%s" % ( self.graphsLocation, plotFileName )
    try:
      fd = file( filename, "rb" )
      data = fd.read()
      fd.close()
    except Exception, e:
      return S_ERROR( "Can't open file %s: %s" % ( plotFileName, str( e ) ) )
    return S_OK( data )
Esempio n. 10
0
class GridPilotDirector(PilotDirector):
    """
    Base Grid PilotDirector class
    Derived classes must declare:

      * self.Middleware: It must correspond to the string before "PilotDirector".
          (For proper naming of the logger)
      * self.ResourceBrokers: list of Brokers used by the Director.
          (For proper error reporting)
  """
    def __init__(self, submitPool):
        """
     Define some defaults and call parent __init__
    """
        self.gridEnv = GRIDENV

        self.cpuPowerRef = CPU_POWER_REF
        self.requirements = REQUIREMENTS
        self.rank = RANK
        self.fuzzyRank = FUZZY_RANK

        self.__failingWMSCache = DictCache()
        self.__ticketsWMSCache = DictCache()
        self.__listMatchWMSCache = DictCache()

        PilotDirector.__init__(self, submitPool)

    def configure(self, csSection, submitPool):
        """
     Here goes common configuration for all Grid PilotDirectors
    """
        PilotDirector.configure(self, csSection, submitPool)
        self.reloadConfiguration(csSection, submitPool)

        self.__failingWMSCache.purgeExpired()
        self.__ticketsWMSCache.purgeExpired()
        for rb in self.__failingWMSCache.getKeys():
            if rb in self.resourceBrokers:
                try:
                    self.resourceBrokers.remove(rb)
                except:
                    pass

        self.resourceBrokers = List.randomize(self.resourceBrokers)

        if self.gridEnv:
            self.log.info(' GridEnv:        ', self.gridEnv)
        if self.resourceBrokers:
            self.log.info(' ResourceBrokers:', ', '.join(self.resourceBrokers))

    def configureFromSection(self, mySection):
        """
      reload from CS
    """
        PilotDirector.configureFromSection(self, mySection)

        self.gridEnv = gConfig.getValue(mySection + '/GridEnv', self.gridEnv)
        if not self.gridEnv:
            # No specific option found, try a general one
            setup = gConfig.getValue('/DIRAC/Setup', '')
            if setup:
                instance = gConfig.getValue(
                    '/DIRAC/Setups/%s/WorkloadManagement' % setup, '')
                if instance:
                    self.gridEnv = gConfig.getValue(
                        '/Systems/WorkloadManagement/%s/GridEnv' % instance,
                        '')

        self.resourceBrokers = gConfig.getValue(mySection + '/ResourceBrokers',
                                                self.resourceBrokers)

        self.cpuPowerRef = gConfig.getValue(mySection + '/CPUPowerRef',
                                            self.cpuPowerRef)
        self.requirements = gConfig.getValue(mySection + '/Requirements',
                                             self.requirements)
        self.rank = gConfig.getValue(mySection + '/Rank', self.rank)
        self.fuzzyRank = gConfig.getValue(mySection + '/FuzzyRank',
                                          self.fuzzyRank)

    def _submitPilots(self, workDir, taskQueueDict, pilotOptions,
                      pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ,
                      proxy, pilotsPerJob):
        """
      This method does the actual pilot submission to the Grid RB
      The logic is as follows:
      - If there are no available RB it return error
      - If there is no VOMS extension in the proxy, return error
      - It creates a temp directory
      - Prepare a JDL
        it has some part common to gLite and LCG (the payload description)
        it has some part specific to each middleware
    """
        taskQueueID = taskQueueDict['TaskQueueID']
        # ownerDN = taskQueueDict['OwnerDN']
        credDict = proxy.getCredentials()['Value']
        ownerDN = credDict['identity']
        ownerGroup = credDict['group']

        if not self.resourceBrokers:
            # Since we can exclude RBs from the list, it may become empty
            return S_ERROR(ERROR_RB)

        # Need to get VOMS extension for the later interactions with WMS
        ret = gProxyManager.getVOMSAttributes(proxy)
        if not ret['OK']:
            self.log.error(ERROR_VOMS, ret['Message'])
            return S_ERROR(ERROR_VOMS)
        if not ret['Value']:
            return S_ERROR(ERROR_VOMS)

        workingDirectory = tempfile.mkdtemp(prefix='TQ_%s_' % taskQueueID,
                                            dir=workDir)
        self.log.verbose('Using working Directory:', workingDirectory)

        # Write JDL
        retDict = self._prepareJDL(taskQueueDict, workingDirectory,
                                   pilotOptions, pilotsPerJob, ceMask,
                                   submitPrivatePilot, privateTQ)
        jdl = retDict['JDL']
        pilotRequirements = retDict['Requirements']
        rb = retDict['RB']
        if not jdl:
            try:
                shutil.rmtree(workingDirectory)
            except:
                pass
            return S_ERROR(ERROR_JDL)

        # Check that there are available queues for the Job:
        if self.enableListMatch:
            availableCEs = []
            now = Time.dateTime()
            availableCEs = self.listMatchCache.get(pilotRequirements)
            if availableCEs is None:
                availableCEs = self._listMatch(proxy, jdl, taskQueueID, rb)
                if availableCEs != False:
                    self.log.verbose('LastListMatch', now)
                    self.log.verbose('AvailableCEs ', availableCEs)
                    self.listMatchCache.add(
                        pilotRequirements,
                        self.listMatchDelay * 60,
                        value=availableCEs)  # it is given in minutes
            if not availableCEs:
                try:
                    shutil.rmtree(workingDirectory)
                except:
                    pass
                return S_ERROR(ERROR_CE + ' TQ: %d' % taskQueueID)

        # Now we are ready for the actual submission, so

        self.log.verbose('Submitting Pilots for TaskQueue', taskQueueID)

        # FIXME: what is this?? If it goes on the super class, it is doomed
        submitRet = self._submitPilot(proxy, pilotsPerJob, jdl, taskQueueID,
                                      rb)
        try:
            shutil.rmtree(workingDirectory)
        except:
            pass
        if not submitRet:
            return S_ERROR('Pilot Submission Failed for TQ %d ' % taskQueueID)
        # pilotReference, resourceBroker = submitRet

        submittedPilots = 0

        if pilotsPerJob != 1 and len(submitRet) != pilotsPerJob:
            # Parametric jobs are used
            for pilotReference, resourceBroker in submitRet:
                pilotReference = self._getChildrenReferences(
                    proxy, pilotReference, taskQueueID)
                submittedPilots += len(pilotReference)
                pilotAgentsDB.addPilotTQReference(pilotReference, taskQueueID,
                                                  ownerDN, ownerGroup,
                                                  resourceBroker,
                                                  self.gridMiddleware,
                                                  pilotRequirements)
        else:
            for pilotReference, resourceBroker in submitRet:
                pilotReference = [pilotReference]
                submittedPilots += len(pilotReference)
                pilotAgentsDB.addPilotTQReference(pilotReference, taskQueueID,
                                                  ownerDN, ownerGroup,
                                                  resourceBroker,
                                                  self.gridMiddleware,
                                                  pilotRequirements)

        # add some sleep here
        time.sleep(0.1 * submittedPilots)

        if pilotsToSubmit > pilotsPerJob:
            # Additional submissions are necessary, need to get a new token and iterate.
            pilotsToSubmit -= pilotsPerJob
            result = gProxyManager.requestToken(
                ownerDN, ownerGroup, max(pilotsToSubmit,
                                         self.maxJobsInFillMode))
            if not result['OK']:
                self.log.error(ERROR_TOKEN, result['Message'])
                result = S_ERROR(ERROR_TOKEN)
                result['Value'] = submittedPilots
                return result
            (token, numberOfUses) = result['Value']
            for option in pilotOptions:
                if option.find('-o /Security/ProxyToken=') == 0:
                    pilotOptions.remove(option)
            pilotOptions.append('-o /Security/ProxyToken=%s' % token)
            pilotsPerJob = max(
                1, min(pilotsPerJob,
                       int(numberOfUses / self.maxJobsInFillMode)))
            result = self._submitPilots(workDir, taskQueueDict, pilotOptions,
                                        pilotsToSubmit, ceMask,
                                        submitPrivatePilot, privateTQ, proxy,
                                        pilotsPerJob)
            if not result['OK']:
                if 'Value' not in result:
                    result['Value'] = 0
                result['Value'] += submittedPilots
                return result
            submittedPilots += result['Value']

        return S_OK(submittedPilots)

    def _prepareJDL(self, taskQueueDict, workingDirectory, pilotOptions,
                    pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ):
        """
      This method should be overridden in a subclass
    """
        self.log.error(
            '_prepareJDL() method should be implemented in a subclass')
        sys.exit()

    def _JobJDL(self, taskQueueDict, pilotOptions, ceMask):
        """
     The Job JDL is the same for LCG and GLite
    """
        pilotJDL = 'Executable     = "%s";\n' % os.path.basename(self.pilot)
        executable = self.pilot

        pilotJDL += 'Arguments     = "%s";\n' % ' '.join(pilotOptions)

        pilotJDL += 'CPUTimeRef    = %s;\n' % taskQueueDict['CPUTime']

        pilotJDL += 'CPUPowerRef   = %s;\n' % self.cpuPowerRef

        pilotJDL += """CPUWorkRef    = real( CPUTimeRef * CPUPowerRef );

Lookup        = "CPUScalingReferenceSI00=*";
cap = isList( other.GlueCECapability ) ? other.GlueCECapability : { "dummy" };
i0 = regexp( Lookup, cap[0] ) ? 0 : undefined;
i1 = isString( cap[1] ) && regexp( Lookup, cap[1] ) ? 1 : i0;
i2 = isString( cap[2] ) && regexp( Lookup, cap[2] ) ? 2 : i1;
i3 = isString( cap[3] ) && regexp( Lookup, cap[3] ) ? 3 : i2;
i4 = isString( cap[4] ) && regexp( Lookup, cap[4] ) ? 4 : i3;
i5 = isString( cap[5] ) && regexp( Lookup, cap[5] ) ? 5 : i4;
index = isString( cap[6] ) && regexp( Lookup, cap[6] ) ? 6 : i5;
i = isUndefined( index ) ? 0 : index;

QueuePowerRef = real( ! isUndefined( index ) ? int( substr( cap[i], size( Lookup ) - 1 ) ) : other.GlueHostBenchmarkSI00 );
QueueTimeRef  = real( other.GlueCEPolicyMaxCPUTime * 60 );
QueueWorkRef  = QueuePowerRef * QueueTimeRef;
"""

        requirements = list(self.requirements)
        if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
            # if there an explicit Grig CE requested by the TQ, remove the Ranking requirement
            for req in self.requirements:
                if req.strip().lower()[:6] == 'rank >':
                    requirements.remove(req)

        requirements.append('QueueWorkRef > CPUWorkRef')

        siteRequirements = '\n || '.join(
            ['other.GlueCEInfoHostName == "%s"' % s for s in ceMask])
        requirements.append("( %s\n )" % siteRequirements)

        pilotRequirements = '\n && '.join(requirements)

        pilotJDL += 'pilotRequirements  = %s;\n' % pilotRequirements

        pilotJDL += 'Rank          = %s;\n' % self.rank
        pilotJDL += 'FuzzyRank     = %s;\n' % self.fuzzyRank
        pilotJDL += 'StdOutput     = "%s";\n' % outputSandboxFiles[0]
        pilotJDL += 'StdError      = "%s";\n' % outputSandboxFiles[1]

        pilotJDL += 'InputSandbox  = { "%s" };\n' % '", "'.join(
            [self.install, executable] + self.extraModules)

        pilotJDL += 'OutputSandbox = { %s };\n' % ', '.join(
            ['"%s"' % f for f in outputSandboxFiles])

        self.log.verbose(pilotJDL)

        return (pilotJDL, pilotRequirements)

    def parseListMatchStdout(self, proxy, cmd, taskQueueID, rb):
        """
      Parse List Match stdout to return list of matched CE's
    """
        self.log.verbose('Executing List Match for TaskQueue', taskQueueID)

        start = time.time()
        ret = executeGridCommand(proxy, cmd, self.gridEnv)

        if not ret['OK']:
            self.log.error('Failed to execute List Match:', ret['Message'])
            self.__sendErrorMail(rb, 'List Match', cmd, ret, proxy)
            return False
        if ret['Value'][0] != 0:
            self.log.error('Error executing List Match:',
                           str(ret['Value'][0]) + '\n'.join(ret['Value'][1:3]))
            self.__sendErrorMail(rb, 'List Match', cmd, ret, proxy)
            return False
        self.log.info('List Match Execution Time: %.2f for TaskQueue %d' %
                      ((time.time() - start), taskQueueID))

        stdout = ret['Value'][1]
        stderr = ret['Value'][2]
        availableCEs = []
        # Parse std.out
        for line in List.fromChar(stdout, '\n'):
            if re.search('/jobmanager-', line) or re.search('/cream-', line):
                # TODO: the line has to be stripped from extra info
                availableCEs.append(line)

        if not availableCEs:
            self.log.info('List-Match failed to find CEs for TaskQueue',
                          taskQueueID)
            self.log.info(stdout)
            self.log.info(stderr)
        else:
            self.log.debug('List-Match returns:',
                           str(ret['Value'][0]) + '\n'.join(ret['Value'][1:3]))
            self.log.info(
                'List-Match found %s CEs for TaskQueue' % len(availableCEs),
                taskQueueID)
            self.log.verbose(', '.join(availableCEs))

        return availableCEs

    def parseJobSubmitStdout(self, proxy, cmd, taskQueueID, rb):
        """
      Parse Job Submit stdout to return pilot reference
    """
        start = time.time()
        self.log.verbose('Executing Job Submit for TaskQueue', taskQueueID)

        ret = executeGridCommand(proxy, cmd, self.gridEnv)

        if not ret['OK']:
            self.log.error('Failed to execute Job Submit:', ret['Message'])
            self.__sendErrorMail(rb, 'Job Submit', cmd, ret, proxy)
            return False
        if ret['Value'][0] != 0:
            self.log.error('Error executing Job Submit:',
                           str(ret['Value'][0]) + '\n'.join(ret['Value'][1:3]))
            self.__sendErrorMail(rb, 'Job Submit', cmd, ret, proxy)
            return False
        self.log.info('Job Submit Execution Time: %.2f for TaskQueue %d' %
                      ((time.time() - start), taskQueueID))

        stdout = ret['Value'][1]

        failed = 1
        rb = ''
        for line in List.fromChar(stdout, '\n'):
            m = re.search("(https:\S+)", line)
            if (m):
                glite_id = m.group(1)
                if not rb:
                    m = re.search("https://(.+):.+", glite_id)
                    rb = m.group(1)
                failed = 0
        if failed:
            self.log.error('Job Submit returns no Reference:',
                           str(ret['Value'][0]) + '\n'.join(ret['Value'][1:3]))
            return False

        self.log.info('Reference %s for TaskQueue %s' %
                      (glite_id, taskQueueID))

        return glite_id, rb

    def _writeJDL(self, filename, jdlList):
        try:
            f = open(filename, 'w')
            f.write('\n'.join(jdlList))
            f.close()
        except Exception as x:
            self.log.exception(x)
            return ''

        return filename

    def __sendErrorMail(self, rb, name, command, result, proxy):
        """
     In case or error with RB/WM:
     - check if RB/WMS still in use
      - remove RB/WMS from current list
      - check if RB/WMS not in cache
        - add RB/WMS to cache
        - send Error mail

    """
        if rb in self.resourceBrokers:
            try:
                self.resourceBrokers.remove(rb)
                self.log.info('Removed RB from list', rb)
            except:
                pass
            if not self.__failingWMSCache.exists(rb):
                self.__failingWMSCache.add(
                    rb, self.errorClearTime)  # disable for 30 minutes
                mailAddress = self.errorMailAddress
                msg = ''
                if not result['OK']:
                    subject = "%s: timeout executing %s" % (rb, name)
                    msg += '\n%s' % result['Message']
                elif result['Value'][0] != 0:
                    if re.search('the server is temporarily drained',
                                 ' '.join(result['Value'][1:3])):
                        return
                    if re.search('System load is too high:',
                                 ' '.join(result['Value'][1:3])):
                        return
                    subject = "%s: error executing %s" % (rb, name)
                else:
                    return
                msg += ' '.join(command)
                msg += '\nreturns: %s\n' % str(result['Value'][0]) + '\n'.join(
                    result['Value'][1:3])
                msg += '\nUsing Proxy:\n' + getProxyInfoAsString(
                    proxy)['Value']

                #msg += '\nUsing Proxy:\n' + gProxyManager.

                ticketTime = self.errorClearTime + self.errorTicketTime

                if self.__ticketsWMSCache.exists(rb):
                    mailAddress = self.alarmMailAddress
                    # the RB was already detected failing a short time ago
                    msg = 'Submit GGUS Ticket for this error if not already opened\n' + \
                                 'It has been failing at least for %s hours\n' % ( ticketTime / 60 / 60 ) + msg
                else:
                    self.__ticketsWMSCache.add(rb, ticketTime)

                if mailAddress:
                    result = NotificationClient().sendMail(
                        mailAddress,
                        subject,
                        msg,
                        fromAddress=self.mailFromAddress)
                    if not result['OK']:
                        self.log.error("Mail could not be sent")

        return
Esempio n. 11
0
class Limiter(object):

  def __init__(self, jobDB=None, opsHelper=None):
    """ Constructor
    """
    self.__runningLimitSection = "JobScheduling/RunningLimit"
    self.__matchingDelaySection = "JobScheduling/MatchingDelay"
    self.csDictCache = DictCache()
    self.condCache = DictCache()
    self.delayMem = {}

    if jobDB:
      self.jobDB = jobDB
    else:
      self.jobDB = JobDB()

    self.log = gLogger.getSubLogger("Limiter")

    if opsHelper:
      self.__opsHelper = opsHelper
    else:
      self.__opsHelper = Operations()

  def getNegativeCond(self):
    """ Get negative condition for ALL sites
    """
    orCond = self.condCache.get("GLOBAL")
    if orCond:
      return orCond
    negCond = {}
    # Run Limit
    result = self.__opsHelper.getSections(self.__runningLimitSection)
    sites = []
    if result['OK']:
      sites = result['Value']
    for siteName in sites:
      result = self.__getRunningCondition(siteName)
      if not result['OK']:
        continue
      data = result['Value']
      if data:
        negCond[siteName] = data
    # Delay limit
    result = self.__opsHelper.getSections(self.__matchingDelaySection)
    sites = []
    if result['OK']:
      sites = result['Value']
    for siteName in sites:
      result = self.__getDelayCondition(siteName)
      if not result['OK']:
        continue
      data = result['Value']
      if not data:
        continue
      if siteName in negCond:
        negCond[siteName] = self.__mergeCond(negCond[siteName], data)
      else:
        negCond[siteName] = data
    orCond = []
    for siteName in negCond:
      negCond[siteName]['Site'] = siteName
      orCond.append(negCond[siteName])
    self.condCache.add("GLOBAL", 10, orCond)
    return orCond

  def getNegativeCondForSite(self, siteName):
    """ Generate a negative query based on the limits set on the site
    """
    # Check if Limits are imposed onto the site
    negativeCond = {}
    if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True):
      result = self.__getRunningCondition(siteName)
      if result['OK']:
        negativeCond = result['Value']
      self.log.verbose('Negative conditions for site',
                       '%s after checking limits are: %s' % (siteName, str(negativeCond)))

    if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True):
      result = self.__getDelayCondition(siteName)
      if result['OK']:
        delayCond = result['Value']
        self.log.verbose('Negative conditions for site',
                         '%s after delay checking are: %s' % (siteName, str(delayCond)))
        negativeCond = self.__mergeCond(negativeCond, delayCond)

    if negativeCond:
      self.log.info('Negative conditions for site',
                    '%s are: %s' % (siteName, str(negativeCond)))

    return negativeCond

  def __mergeCond(self, negCond, addCond):
    """ Merge two negative dicts
    """
    # Merge both negative dicts
    for attr in addCond:
      if attr not in negCond:
        negCond[attr] = []
      for value in addCond[attr]:
        if value not in negCond[attr]:
          negCond[attr].append(value)
    return negCond

  def __extractCSData(self, section):
    """ Extract limiting information from the CS in the form:
        { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    """
    stuffDict = self.csDictCache.get(section)
    if stuffDict:
      return S_OK(stuffDict)

    result = self.__opsHelper.getSections(section)
    if not result['OK']:
      return result
    attribs = result['Value']
    stuffDict = {}
    for attName in attribs:
      result = self.__opsHelper.getOptionsDict("%s/%s" % (section, attName))
      if not result['OK']:
        return result
      attLimits = result['Value']
      try:
        attLimits = dict([(k, int(attLimits[k])) for k in attLimits])
      except Exception as excp:
        errMsg = "%s/%s has to contain numbers: %s" % (section, attName, str(excp))
        self.log.error(errMsg)
        return S_ERROR(errMsg)
      stuffDict[attName] = attLimits

    self.csDictCache.add(section, 300, stuffDict)
    return S_OK(stuffDict)

  def __getRunningCondition(self, siteName):
    """ Get extra conditions allowing site throttling
    """
    siteSection = "%s/%s" % (self.__runningLimitSection, siteName)
    result = self.__extractCSData(siteSection)
    if not result['OK']:
      return result
    limitsDict = result['Value']
    # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    if not limitsDict:
      return S_OK({})
    # Check if the site exceeding the given limits
    negCond = {}
    for attName in limitsDict:
      if attName not in self.jobDB.jobAttributeNames:
        self.log.error("Attribute does not exist",
                       "(%s). Check the job limits" % attName)
        continue
      cK = "Running:%s:%s" % (siteName, attName)
      data = self.condCache.get(cK)
      if not data:
        result = self.jobDB.getCounters(
            'Jobs', [attName], {
                'Site': siteName, 'Status': [
                    'Running', 'Matched', 'Stalled']})
        if not result['OK']:
          return result
        data = result['Value']
        data = dict([(k[0][attName], k[1]) for k in data])
        self.condCache.add(cK, 10, data)
      for attValue in limitsDict[attName]:
        limit = limitsDict[attName][attValue]
        running = data.get(attValue, 0)
        if running >= limit:
          self.log.verbose('Job Limit imposed',
                           'at %s on %s/%s=%d, %d jobs already deployed' % (siteName,
                                                                            attName, attValue, limit, running))
          if attName not in negCond:
            negCond[attName] = []
          negCond[attName].append(attValue)
    # negCond is something like : {'JobType': ['Merge']}
    return S_OK(negCond)

  def updateDelayCounters(self, siteName, jid):
    # Get the info from the CS
    siteSection = "%s/%s" % (self.__matchingDelaySection, siteName)
    result = self.__extractCSData(siteSection)
    if not result['OK']:
      return result
    delayDict = result['Value']
    # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    if not delayDict:
      return S_OK()
    attNames = []
    for attName in delayDict:
      if attName not in self.jobDB.jobAttributeNames:
        self.log.error("Attribute does not exist in the JobDB. Please fix it!",
                       "(%s)" % attName)
      else:
        attNames.append(attName)
    result = self.jobDB.getJobAttributes(jid, attNames)
    if not result['OK']:
      self.log.error("Error while retrieving attributes",
                     "coming from %s: %s" % (siteSection, result['Message']))
      return result
    atts = result['Value']
    # Create the DictCache if not there
    if siteName not in self.delayMem:
      self.delayMem[siteName] = DictCache()
    # Update the counters
    delayCounter = self.delayMem[siteName]
    for attName in atts:
      attValue = atts[attName]
      if attValue in delayDict[attName]:
        delayTime = delayDict[attName][attValue]
        self.log.notice("Adding delay for %s/%s=%s of %s secs" % (siteName, attName,
                                                                  attValue, delayTime))
        delayCounter.add((attName, attValue), delayTime)
    return S_OK()

  def __getDelayCondition(self, siteName):
    """ Get extra conditions allowing matching delay
    """
    if siteName not in self.delayMem:
      return S_OK({})
    lastRun = self.delayMem[siteName].getKeys()
    negCond = {}
    for attName, attValue in lastRun:
      if attName not in negCond:
        negCond[attName] = []
      negCond[attName].append(attValue)
    return S_OK(negCond)
Esempio n. 12
0
class Cache:
    """
    Cache basic class.

    WARNING: None of its methods is thread safe. Acquire / Release lock when
    using them !
    """
    def __init__(self, lifeTime, updateFunc):
        """
        Constructor

        :Parameters:
          **lifeTime** - `int`
            Lifetime of the elements in the cache ( seconds ! )
          **updateFunc** - `function`
            This function MUST return a S_OK | S_ERROR object. In the case of the first,
            its value must be a dictionary.

        """

        # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
        # starting at the same time, all the caches will not end at the same time.
        randomLifeTimeBias = 0.2 * random.random()

        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self.__lifeTime = int(lifeTime * (1 + randomLifeTimeBias))
        self.__updateFunc = updateFunc
        # The records returned from the cache must be valid at least 30 seconds.
        self.__validSeconds = 30

        # Cache
        self.__cache = DictCache()
        self.__cacheLock = LockRing()
        self.__cacheLock.getLock(self.__class__.__name__)

    # internal cache object getter

    def cacheKeys(self):
        """
        Cache keys getter

        :returns: list with keys in the cache valid for at least twice the validity period of the element
        """

        # Here we need to have more than the validity period because of the logic of the matching:
        # * get all the keys with validity T
        # * for each key K, get the element K with validity T
        # This logic fails for elements just at the limit of the required time
        return self.__cache.getKeys(validSeconds=self.__validSeconds * 2)

    # acquire / release Locks

    def acquireLock(self):
        """
        Acquires Cache lock
        """
        self.__cacheLock.acquire(self.__class__.__name__)

    def releaseLock(self):
        """
        Releases Cache lock
        """
        self.__cacheLock.release(self.__class__.__name__)

    # Cache getters

    def get(self, cacheKeys):
        """
        Gets values for cacheKeys given, if all are found ( present on the cache and
        valid ), returns S_OK with the results. If any is not neither present not
        valid, returns S_ERROR.

        :Parameters:
          **cacheKeys** - `list`
            list of keys to be extracted from the cache

        :return: S_OK | S_ERROR
        """

        result = {}

        for cacheKey in cacheKeys:
            cacheRow = self.__cache.get(cacheKey,
                                        validSeconds=self.__validSeconds)

            if not cacheRow:
                return S_ERROR("Cannot get %s" % str(cacheKey))
            result.update({cacheKey: cacheRow})

        return S_OK(result)

    def check(self, cacheKeys, vO):
        """
        Modified get() method. Attempts to find keys with a vO value appended or 'all'
        value appended. The cacheKeys passed in are 'flattened' cache keys (no vO)
        Gets values for cacheKeys given, if all are found ( present on the cache and
        valid ), returns S_OK with the results. If any is not neither present not
        valid, returns S_ERROR.

        :Parameters:
          **cacheKeys** - `list`
            list of keys to be extracted from the cache

        :return: S_OK | S_ERROR
        """

        result = {}

        for cacheKey in cacheKeys:
            longCacheKey = cacheKey + ("all", )
            cacheRow = self.__cache.get(longCacheKey,
                                        validSeconds=self.__validSeconds)
            if not cacheRow:
                longCacheKey = cacheKey + (vO, )
                cacheRow = self.__cache.get(longCacheKey,
                                            validSeconds=self.__validSeconds)
                if not cacheRow:
                    return S_ERROR(
                        'Cannot get extended %s (neither for VO = %s nor for "all" Vos)'
                        % (str(cacheKey), vO))
            result.update({longCacheKey: cacheRow})

        return S_OK(result)

    # Cache refreshers

    def refreshCache(self):
        """
        Purges the cache and gets fresh data from the update function.

        :return: S_OK | S_ERROR. If the first, its content is the new cache.
        """

        self.log.verbose("refreshing...")

        self.__cache.purgeAll()

        newCache = self.__updateFunc()
        if not newCache["OK"]:
            self.log.error(newCache["Message"])
            return newCache

        newCache = self.__updateCache(newCache["Value"])

        self.log.verbose("refreshed")

        return newCache

    # Private methods

    def __updateCache(self, newCache):
        """
        Given the new cache dictionary, updates the internal cache with it. It sets
        a duration to the entries of <self.__lifeTime> seconds.

        :Parameters:
          **newCache** - `dict`
            dictionary containing a new cache

        :return: dictionary. It is newCache argument.
        """

        for cacheKey, cacheValue in newCache.items():
            self.__cache.add(cacheKey, self.__lifeTime, value=cacheValue)

        # We are assuming nothing will fail while inserting in the cache. There is
        # no apparent reason to suspect from that piece of code.
        return S_OK(newCache)
Esempio n. 13
0
class GridPilotDirector( PilotDirector ):
  """
    Base Grid PilotDirector class
    Derived classes must declare:
      self.Middleware: It must correspond to the string before "PilotDirector".
        (For proper naming of the logger)
      self.ResourceBrokers: list of Brokers used by the Director.
        (For proper error reporting)
  """
  def __init__( self, submitPool ):
    """
     Define some defaults and call parent __init__
    """
    self.gridEnv = GRIDENV

    self.cpuPowerRef = CPU_POWER_REF
    self.requirements = REQUIREMENTS
    self.rank = RANK
    self.fuzzyRank = FUZZY_RANK

    self.__failingWMSCache = DictCache()
    self.__ticketsWMSCache = DictCache()
    self.__listMatchWMSCache = DictCache()

    PilotDirector.__init__( self, submitPool )

  def configure( self, csSection, submitPool ):
    """
     Here goes common configuration for all Grid PilotDirectors
    """
    PilotDirector.configure( self, csSection, submitPool )
    self.reloadConfiguration( csSection, submitPool )

    self.__failingWMSCache.purgeExpired()
    self.__ticketsWMSCache.purgeExpired()
    for rb in self.__failingWMSCache.getKeys():
      if rb in self.resourceBrokers:
        try:
          self.resourceBrokers.remove( rb )
        except:
          pass

    self.resourceBrokers = List.randomize( self.resourceBrokers )

    if self.gridEnv:
      self.log.info( ' GridEnv:        ', self.gridEnv )
    if self.resourceBrokers:
      self.log.info( ' ResourceBrokers:', ', '.join( self.resourceBrokers ) )

  def configureFromSection( self, mySection ):
    """
      reload from CS
    """
    PilotDirector.configureFromSection( self, mySection )

    self.gridEnv = gConfig.getValue( mySection + '/GridEnv', self.gridEnv )
    if not self.gridEnv:
      # No specific option found, try a general one
      setup = gConfig.getValue( '/DIRAC/Setup', '' )
      if setup:
        instance = gConfig.getValue( '/DIRAC/Setups/%s/WorkloadManagement' % setup, '' )
        if instance:
          self.gridEnv = gConfig.getValue( '/Systems/WorkloadManagement/%s/GridEnv' % instance, '' )

    self.resourceBrokers = gConfig.getValue( mySection + '/ResourceBrokers'      , self.resourceBrokers )

    self.cpuPowerRef = gConfig.getValue( mySection + '/CPUPowerRef'           , self.cpuPowerRef )
    self.requirements = gConfig.getValue( mySection + '/Requirements'         , self.requirements )
    self.rank = gConfig.getValue( mySection + '/Rank'                 , self.rank )
    self.fuzzyRank = gConfig.getValue( mySection + '/FuzzyRank'            , self.fuzzyRank )

  def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
                     ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
    """
      This method does the actual pilot submission to the Grid RB
      The logic is as follows:
      - If there are no available RB it return error
      - If there is no VOMS extension in the proxy, return error
      - It creates a temp directory
      - Prepare a JDL
        it has some part common to gLite and LCG (the payload description)
        it has some part specific to each middleware
    """
    taskQueueID = taskQueueDict['TaskQueueID']
    # ownerDN = taskQueueDict['OwnerDN']
    credDict = proxy.getCredentials()['Value']
    ownerDN = credDict['identity']
    ownerGroup = credDict[ 'group' ]

    if not self.resourceBrokers:
      # Since we can exclude RBs from the list, it may become empty
      return S_ERROR( ERROR_RB )

    # Need to get VOMS extension for the later interactions with WMS
    ret = gProxyManager.getVOMSAttributes( proxy )
    if not ret['OK']:
      self.log.error( ERROR_VOMS, ret['Message'] )
      return S_ERROR( ERROR_VOMS )
    if not ret['Value']:
      return S_ERROR( ERROR_VOMS )

    workingDirectory = tempfile.mkdtemp( prefix = 'TQ_%s_' % taskQueueID, dir = workDir )
    self.log.verbose( 'Using working Directory:', workingDirectory )

    # Write JDL
    retDict = self._prepareJDL( taskQueueDict, workingDirectory, pilotOptions, pilotsPerJob,
                                ceMask, submitPrivatePilot, privateTQ )
    jdl = retDict['JDL']
    pilotRequirements = retDict['Requirements']
    rb = retDict['RB']
    if not jdl:
      try:
        shutil.rmtree( workingDirectory )
      except:
        pass
      return S_ERROR( ERROR_JDL )

    # Check that there are available queues for the Job:
    if self.enableListMatch:
      availableCEs = []
      now = Time.dateTime()
      availableCEs = self.listMatchCache.get( pilotRequirements )
      if availableCEs is None:
        availableCEs = self._listMatch( proxy, jdl, taskQueueID, rb )
        if availableCEs != False:
          self.log.verbose( 'LastListMatch', now )
          self.log.verbose( 'AvailableCEs ', availableCEs )
          self.listMatchCache.add( pilotRequirements, self.listMatchDelay * 60,
                                   value = availableCEs )                      # it is given in minutes
      if not availableCEs:
        try:
          shutil.rmtree( workingDirectory )
        except:
          pass
        return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )

    # Now we are ready for the actual submission, so

    self.log.verbose( 'Submitting Pilots for TaskQueue', taskQueueID )

    # FIXME: what is this?? If it goes on the super class, it is doomed
    submitRet = self._submitPilot( proxy, pilotsPerJob, jdl, taskQueueID, rb )
    try:
      shutil.rmtree( workingDirectory )
    except:
      pass
    if not submitRet:
      return S_ERROR( 'Pilot Submission Failed for TQ %d ' % taskQueueID )
    # pilotReference, resourceBroker = submitRet

    submittedPilots = 0

    if pilotsPerJob != 1 and len( submitRet ) != pilotsPerJob:
      # Parametric jobs are used
      for pilotReference, resourceBroker in submitRet:
        pilotReference = self._getChildrenReferences( proxy, pilotReference, taskQueueID )
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware,
                      pilotRequirements )
    else:
      for pilotReference, resourceBroker in submitRet:
        pilotReference = [pilotReference]
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware, pilotRequirements )

    # add some sleep here
    time.sleep( 0.1 * submittedPilots )

    if pilotsToSubmit > pilotsPerJob:
      # Additional submissions are necessary, need to get a new token and iterate.
      pilotsToSubmit -= pilotsPerJob
      result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
      if not result[ 'OK' ]:
        self.log.error( ERROR_TOKEN, result['Message'] )
        result = S_ERROR( ERROR_TOKEN )
        result['Value'] = submittedPilots
        return result
      ( token, numberOfUses ) = result[ 'Value' ]
      for option in pilotOptions:
        if option.find( '-o /Security/ProxyToken=' ) == 0:
          pilotOptions.remove( option )
      pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
      pilotsPerJob = max( 1, min( pilotsPerJob, int( numberOfUses / self.maxJobsInFillMode ) ) )
      result = self._submitPilots( workDir, taskQueueDict, pilotOptions,
                                   pilotsToSubmit, ceMask,
                                   submitPrivatePilot, privateTQ,
                                   proxy, pilotsPerJob )
      if not result['OK']:
        if 'Value' not in result:
          result['Value'] = 0
        result['Value'] += submittedPilots
        return result
      submittedPilots += result['Value']

    return S_OK( submittedPilots )

  def _prepareJDL( self, taskQueueDict, workingDirectory, pilotOptions, pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ ):
    """
      This method should be overridden in a subclass
    """
    self.log.error( '_prepareJDL() method should be implemented in a subclass' )
    sys.exit()

  def _JobJDL( self, taskQueueDict, pilotOptions, ceMask ):
    """
     The Job JDL is the same for LCG and GLite
    """
    pilotJDL = 'Executable     = "%s";\n' % os.path.basename( self.pilot )
    executable = self.pilot

    pilotJDL += 'Arguments     = "%s";\n' % ' '.join( pilotOptions )

    pilotJDL += 'CPUTimeRef    = %s;\n' % taskQueueDict['CPUTime']

    pilotJDL += 'CPUPowerRef   = %s;\n' % self.cpuPowerRef

    pilotJDL += """CPUWorkRef    = real( CPUTimeRef * CPUPowerRef );

Lookup        = "CPUScalingReferenceSI00=*";
cap = isList( other.GlueCECapability ) ? other.GlueCECapability : { "dummy" };
i0 = regexp( Lookup, cap[0] ) ? 0 : undefined;
i1 = isString( cap[1] ) && regexp( Lookup, cap[1] ) ? 1 : i0;
i2 = isString( cap[2] ) && regexp( Lookup, cap[2] ) ? 2 : i1;
i3 = isString( cap[3] ) && regexp( Lookup, cap[3] ) ? 3 : i2;
i4 = isString( cap[4] ) && regexp( Lookup, cap[4] ) ? 4 : i3;
i5 = isString( cap[5] ) && regexp( Lookup, cap[5] ) ? 5 : i4;
index = isString( cap[6] ) && regexp( Lookup, cap[6] ) ? 6 : i5;
i = isUndefined( index ) ? 0 : index;

QueuePowerRef = real( ! isUndefined( index ) ? int( substr( cap[i], size( Lookup ) - 1 ) ) : other.GlueHostBenchmarkSI00 );
QueueTimeRef  = real( other.GlueCEPolicyMaxCPUTime * 60 );
QueueWorkRef  = QueuePowerRef * QueueTimeRef;
"""

    requirements = list( self.requirements )
    if 'GridCEs' in taskQueueDict and taskQueueDict['GridCEs']:
      # if there an explicit Grig CE requested by the TQ, remove the Ranking requirement
      for req in self.requirements:
        if req.strip().lower()[:6] == 'rank >':
          requirements.remove( req )

    requirements.append( 'QueueWorkRef > CPUWorkRef' )

    siteRequirements = '\n || '.join( [ 'other.GlueCEInfoHostName == "%s"' % s for s in ceMask ] )
    requirements.append( "( %s\n )" % siteRequirements )

    pilotRequirements = '\n && '.join( requirements )

    pilotJDL += 'pilotRequirements  = %s;\n' % pilotRequirements

    pilotJDL += 'Rank          = %s;\n' % self.rank
    pilotJDL += 'FuzzyRank     = %s;\n' % self.fuzzyRank
    pilotJDL += 'StdOutput     = "%s";\n' % outputSandboxFiles[0]
    pilotJDL += 'StdError      = "%s";\n' % outputSandboxFiles[1]

    pilotJDL += 'InputSandbox  = { "%s" };\n' % '", "'.join( [ self.install, executable ] + self.extraModules )

    pilotJDL += 'OutputSandbox = { %s };\n' % ', '.join( [ '"%s"' % f for f in outputSandboxFiles ] )

    self.log.verbose( pilotJDL )

    return ( pilotJDL, pilotRequirements )


  def parseListMatchStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse List Match stdout to return list of matched CE's
    """
    self.log.verbose( 'Executing List Match for TaskQueue', taskQueueID )

    start = time.time()
    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute List Match:', ret['Message'] )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing List Match:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'List Match', cmd, ret, proxy )
      return False
    self.log.info( 'List Match Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]
    stderr = ret['Value'][2]
    availableCEs = []
    # Parse std.out
    for line in List.fromChar( stdout, '\n' ):
      if re.search( '/jobmanager-', line ) or re.search( '/cream-', line ):
        # TODO: the line has to be stripped from extra info
        availableCEs.append( line )

    if not availableCEs:
      self.log.info( 'List-Match failed to find CEs for TaskQueue', taskQueueID )
      self.log.info( stdout )
      self.log.info( stderr )
    else:
      self.log.debug( 'List-Match returns:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.log.info( 'List-Match found %s CEs for TaskQueue' % len( availableCEs ), taskQueueID )
      self.log.verbose( ', '.join( availableCEs ) )


    return availableCEs

  def parseJobSubmitStdout( self, proxy, cmd, taskQueueID, rb ):
    """
      Parse Job Submit stdout to return pilot reference
    """
    start = time.time()
    self.log.verbose( 'Executing Job Submit for TaskQueue', taskQueueID )

    ret = executeGridCommand( proxy, cmd, self.gridEnv )

    if not ret['OK']:
      self.log.error( 'Failed to execute Job Submit:', ret['Message'] )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    if ret['Value'][0] != 0:
      self.log.error( 'Error executing Job Submit:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      self.__sendErrorMail( rb, 'Job Submit', cmd, ret, proxy )
      return False
    self.log.info( 'Job Submit Execution Time: %.2f for TaskQueue %d' % ( ( time.time() - start ), taskQueueID ) )

    stdout = ret['Value'][1]

    failed = 1
    rb = ''
    for line in List.fromChar( stdout, '\n' ):
      m = re.search( "(https:\S+)", line )
      if ( m ):
        glite_id = m.group( 1 )
        if not rb:
          m = re.search( "https://(.+):.+", glite_id )
          rb = m.group( 1 )
        failed = 0
    if failed:
      self.log.error( 'Job Submit returns no Reference:', str( ret['Value'][0] ) + '\n'.join( ret['Value'][1:3] ) )
      return False

    self.log.info( 'Reference %s for TaskQueue %s' % ( glite_id, taskQueueID ) )

    return glite_id, rb

  def _writeJDL( self, filename, jdlList ):
    try:
      f = open( filename, 'w' )
      f.write( '\n'.join( jdlList ) )
      f.close()
    except Exception as x:
      self.log.exception( x )
      return ''

    return filename

  def __sendErrorMail( self, rb, name, command, result, proxy ):
    """
     In case or error with RB/WM:
     - check if RB/WMS still in use
      - remove RB/WMS from current list
      - check if RB/WMS not in cache
        - add RB/WMS to cache
        - send Error mail

    """
    if rb in self.resourceBrokers:
      try:
        self.resourceBrokers.remove( rb )
        self.log.info( 'Removed RB from list', rb )
      except:
        pass
      if not self.__failingWMSCache.exists( rb ):
        self.__failingWMSCache.add( rb, self.errorClearTime ) # disable for 30 minutes
        mailAddress = self.errorMailAddress
        msg = ''
        if not result['OK']:
          subject = "%s: timeout executing %s" % ( rb, name )
          msg += '\n%s' % result['Message']
        elif result['Value'][0] != 0:
          if re.search( 'the server is temporarily drained', ' '.join( result['Value'][1:3] ) ):
            return
          if re.search( 'System load is too high:', ' '.join( result['Value'][1:3] ) ):
            return
          subject = "%s: error executing %s" % ( rb, name )
        else:
          return
        msg += ' '.join( command )
        msg += '\nreturns: %s\n' % str( result['Value'][0] ) + '\n'.join( result['Value'][1:3] )
        msg += '\nUsing Proxy:\n' + getProxyInfoAsString( proxy )['Value']

        #msg += '\nUsing Proxy:\n' + gProxyManager.

        ticketTime = self.errorClearTime + self.errorTicketTime

        if self.__ticketsWMSCache.exists( rb ):
          mailAddress = self.alarmMailAddress
          # the RB was already detected failing a short time ago
          msg = 'Submit GGUS Ticket for this error if not already opened\n' + \
                       'It has been failing at least for %s hours\n' % ( ticketTime / 60 / 60 ) + msg
        else:
          self.__ticketsWMSCache.add( rb, ticketTime )

        if mailAddress:
          result = NotificationClient().sendMail( mailAddress, subject, msg, fromAddress = self.mailFromAddress )
          if not result[ 'OK' ]:
            self.log.error( "Mail could not be sent" )

    return
Esempio n. 14
0
class StorageUsageAgent(AgentModule):
    ''' .. class:: StorageUsageAgent


  :param FileCatalog catalog: FileCatalog instance
  :parma mixed storageUsage: StorageUsageDB instance or its rpc client
  :param int pollingTime: polling time
  :param int activePeriod: active period on weeks
  :param threading.Lock dataLock: data lock
  :param threading.Lock replicaListLock: replica list lock
  :param DictCache proxyCache: creds cache
  '''
    catalog = None
    storageUsage = None
    pollingTime = 43200
    activePeriod = 0
    dataLock = None  # threading.Lock()
    replicaListLock = None  # threading.Lock()
    proxyCache = None  # DictCache()
    enableStartupSleep = True  # Enable a random sleep so not all the user agents start together

    def __init__(self, *args, **kwargs):
        ''' c'tor
    '''
        AgentModule.__init__(self, *args, **kwargs)

        self.__baseDir = '/lhcb'
        self.__baseDirLabel = "_".join(List.fromChar(self.__baseDir, "/"))
        self.__ignoreDirsList = []
        self.__keepDirLevels = 4

        self.__startExecutionTime = long(time.time())
        self.__dirExplorer = DirectoryExplorer(reverse=True)
        self.__processedDirs = 0
        self.__directoryOwners = {}
        self.catalog = FileCatalog()
        self.__maxToPublish = self.am_getOption('MaxDirectories', 5000)
        if self.am_getOption('DirectDB', False):
            self.storageUsage = StorageUsageDB()
        else:
            # Set a timeout of 0.1 seconds per directory (factor 5 margin)
            self.storageUsage = RPCClient('DataManagement/StorageUsage',
                                          timeout=self.am_getOption(
                                              'Timeout',
                                              int(self.__maxToPublish * 0.1)))
        self.activePeriod = self.am_getOption('ActivePeriod',
                                              self.activePeriod)
        self.dataLock = threading.Lock()
        self.replicaListLock = threading.Lock()
        self.proxyCache = DictCache(removeProxy)
        self.__noProxy = set()
        self.__catalogType = None
        self.__recalculateUsage = Operations().getValue(
            'DataManagement/RecalculateDirSize', False)
        self.enableStartupSleep = self.am_getOption('EnableStartupSleep',
                                                    self.enableStartupSleep)
        self.__publishDirQueue = {}
        self.__dirsToPublish = {}
        self.__replicaFilesUsed = set()
        self.__replicaListFilesDir = ""

    def initialize(self):
        ''' agent initialisation '''

        self.am_setOption("PollingTime", self.pollingTime)

        if self.enableStartupSleep:
            rndSleep = random.randint(1, self.pollingTime)
            self.log.info("Sleeping for %s seconds" % rndSleep)
            time.sleep(rndSleep)

        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/DataManager
        # the shifterProxy option in the Configsorteduration can be used to change this default.
        self.am_setOption('shifterProxy', 'DataManager')

        return S_OK()

    def __writeReplicasListFiles(self, dirPathList):
        ''' dump replicas list to files '''
        self.replicaListLock.acquire()
        try:
            self.log.info("Dumping replicas for %s dirs" % len(dirPathList))
            result = self.catalog.getDirectoryReplicas(dirPathList)
            if not result['OK']:
                self.log.error("Could not get directory replicas",
                               "%s -> %s" % (dirPathList, result['Message']))
                return result
            resData = result['Value']
            filesOpened = {}
            for dirPath in dirPathList:
                if dirPath in result['Value']['Failed']:
                    self.log.error(
                        "Could not get directory replicas",
                        "%s -> %s" % (dirPath, resData['Failed'][dirPath]))
                    continue
                dirData = resData['Successful'][dirPath]
                for lfn in dirData:
                    for seName in dirData[lfn]:
                        if seName not in filesOpened:
                            filePath = os.path.join(
                                self.__replicaListFilesDir,
                                "replicas.%s.%s.filling" %
                                (seName, self.__baseDirLabel))
                            # Check if file is opened and if not open it
                            if seName not in filesOpened:
                                if seName not in self.__replicaFilesUsed:
                                    self.__replicaFilesUsed.add(seName)
                                    filesOpened[seName] = file(filePath, "w")
                                else:
                                    filesOpened[seName] = file(filePath, "a")
                        # seName file is opened. Write
                        filesOpened[seName].write("%s -> %s\n" %
                                                  (lfn, dirData[lfn][seName]))
            # Close the files
            for seName in filesOpened:
                filesOpened[seName].close()
            return S_OK()
        finally:
            self.replicaListLock.release()

    def __resetReplicaListFiles(self):
        ''' prepare directories for replica list files '''
        self.__replicaFilesUsed = set()
        self.__replicaListFilesDir = os.path.join(
            self.am_getOption("WorkDirectory"), "replicaLists")
        mkDir(self.__replicaListFilesDir)
        self.log.info("Replica Lists directory is %s" %
                      self.__replicaListFilesDir)

    def __replicaListFilesDone(self):
        ''' rotate replicas list files '''
        self.replicaListLock.acquire()
        try:
            old = re.compile(r"^replicas\.([a-zA-Z0-9\-_]*)\.%s\.old$" %
                             self.__baseDirLabel)
            current = re.compile(r"^replicas\.([a-zA-Z0-9\-_]*)\.%s$" %
                                 self.__baseDirLabel)
            filling = re.compile(
                r"^replicas\.([a-zA-Z0-9\-_]*)\.%s\.filling$" %
                self.__baseDirLabel)
            # Delete old
            for fileName in os.listdir(self.__replicaListFilesDir):
                match = old.match(fileName)
                if match:
                    os.unlink(
                        os.path.join(self.__replicaListFilesDir, fileName))
            # Current -> old
            for fileName in os.listdir(self.__replicaListFilesDir):
                match = current.match(fileName)
                if match:
                    newFileName = "replicas.%s.%s.old" % (match.group(1),
                                                          self.__baseDirLabel)
                    self.log.info(
                        "Moving \n %s\n to \n %s" %
                        (os.path.join(self.__replicaListFilesDir, fileName),
                         os.path.join(self.__replicaListFilesDir,
                                      newFileName)))
                    os.rename(
                        os.path.join(self.__replicaListFilesDir, fileName),
                        os.path.join(self.__replicaListFilesDir, newFileName))
            # filling to current
            for fileName in os.listdir(self.__replicaListFilesDir):
                match = filling.match(fileName)
                if match:
                    newFileName = "replicas.%s.%s" % (match.group(1),
                                                      self.__baseDirLabel)
                    self.log.info(
                        "Moving \n %s\n to \n %s" %
                        (os.path.join(self.__replicaListFilesDir, fileName),
                         os.path.join(self.__replicaListFilesDir,
                                      newFileName)))
                    os.rename(
                        os.path.join(self.__replicaListFilesDir, fileName),
                        os.path.join(self.__replicaListFilesDir, newFileName))

            return S_OK()
        finally:
            self.replicaListLock.release()

    def __printSummary(self):
        ''' pretty print summary '''
        res = self.storageUsage.getStorageSummary()
        if res['OK']:
            self.log.notice("Storage Usage Summary")
            self.log.notice(
                "============================================================")
            self.log.notice(
                "%-40s %20s %20s" %
                ('Storage Element', 'Number of files', 'Total size'))

            for se in sorted(res['Value']):
                site = se.split('_')[0].split('-')[0]
                gMonitor.registerActivity("%s-used" % se,
                                          "%s usage" % se,
                                          "StorageUsage/%s usage" % site,
                                          "",
                                          gMonitor.OP_MEAN,
                                          bucketLength=600)
                gMonitor.registerActivity("%s-files" % se,
                                          "%s files" % se,
                                          "StorageUsage/%s files" % site,
                                          "Files",
                                          gMonitor.OP_MEAN,
                                          bucketLength=600)

            time.sleep(2)

            for se in sorted(res['Value']):
                usage = res['Value'][se]['Size']
                files = res['Value'][se]['Files']
                self.log.notice("%-40s %20s %20s" %
                                (se, str(files), str(usage)))
                gMonitor.addMark("%s-used" % se, usage)
                gMonitor.addMark("%s-files" % se, files)

    def execute(self):
        ''' execution in one cycle '''
        self.__publishDirQueue = {}
        self.__dirsToPublish = {}
        self.__baseDir = self.am_getOption('BaseDirectory', '/lhcb')
        self.__baseDirLabel = "_".join(List.fromChar(self.__baseDir, "/"))
        self.__ignoreDirsList = self.am_getOption('Ignore', [])
        self.__keepDirLevels = self.am_getOption("KeepDirLevels", 4)

        self.__startExecutionTime = long(time.time())
        self.__dirExplorer = DirectoryExplorer(reverse=True)
        self.__resetReplicaListFiles()
        self.__noProxy = set()
        self.__processedDirs = 0
        self.__directoryOwners = {}

        self.__printSummary()

        self.__dirExplorer.addDir(self.__baseDir)
        self.log.notice("Initiating with %s as base directory." %
                        self.__baseDir)
        # Loop over all the directories and sub-directories
        totalIterTime = 0.0
        numIterations = 0.0
        iterMaxDirs = 100
        while self.__dirExplorer.isActive():
            startT = time.time()
            d2E = [
                self.__dirExplorer.getNextDir() for _i in xrange(iterMaxDirs)
                if self.__dirExplorer.isActive()
            ]
            self.__exploreDirList(d2E)
            iterTime = time.time() - startT
            totalIterTime += iterTime
            numIterations += len(d2E)
            self.log.verbose("Query took %.2f seconds for %s dirs" %
                             (iterTime, len(d2E)))
        self.log.verbose("Average query time: %2.f secs/dir" %
                         (totalIterTime / numIterations))

        # Publish remaining directories
        self.__publishData(background=False)

        # Move replica list files
        self.__replicaListFilesDone()

        # Clean records older than 1 day
        self.log.info("Finished recursive directory search.")

        if self.am_getOption("PurgeOutdatedRecords", True):
            elapsedTime = time.time() - self.__startExecutionTime
            outdatedSeconds = max(
                max(self.am_getOption("PollingTime"), elapsedTime) * 2, 86400)
            result = self.storageUsage.purgeOutdatedEntries(
                self.__baseDir, long(outdatedSeconds), self.__ignoreDirsList)
            if not result['OK']:
                return result
            self.log.notice("Purged %s outdated records" % result['Value'])
        return S_OK()

    def __exploreDirList(self, dirList):
        ''' collect directory size for directory in :dirList: '''
        # Normalise dirList first
        dirList = [os.path.realpath(d) for d in dirList]
        self.log.notice("Retrieving info for %s dirs" % len(dirList))
        # For top directories, no files anyway, hence no need to get full size
        dirContents = {}
        failed = {}
        successfull = {}
        startTime = time.time()
        nbDirs = len(dirList)
        chunkSize = 10
        if self.__catalogType == 'DFC' or dirList == [self.__baseDir]:
            # Get the content of the directory as anyway this is needed
            for dirChunk in breakListIntoChunks(dirList, chunkSize):
                res = self.catalog.listDirectory(dirChunk, True, timeout=600)
                if not res['OK']:
                    failed.update(dict.fromkeys(dirChunk, res['Message']))
                else:
                    failed.update(res['Value']['Failed'])
                    dirContents.update(res['Value']['Successful'])
            self.log.info(
                'Time to retrieve content of %d directories: %.1f seconds' %
                (nbDirs, time.time() - startTime))
            for dirPath in failed:
                dirList.remove(dirPath)
        # We don't need to get the storage usage if there are no files...
        dirListSize = [
            d for d in dirList if dirContents.get(d, {}).get('Files')
        ]

        startTime1 = time.time()
        # __recalculateUsage enables to recompute the directory usage in case the internal table is wrong
        for args in [(d, True, self.__recalculateUsage)
                     for d in breakListIntoChunks(dirListSize, chunkSize)]:
            res = self.catalog.getDirectorySize(*args, timeout=600)
            if not res['OK']:
                failed.update(dict.fromkeys(args[0], res['Message']))
            else:
                failed.update(res['Value']['Failed'])
                successfull.update(res['Value']['Successful'])
        errorReason = {}
        for dirPath in failed:
            error = str(failed[dirPath])
            errorReason.setdefault(error, []).append(dirPath)
        for error in errorReason:
            self.log.error(
                'Failed to get directory info',
                '- %s for:\n\t%s' % (error, '\n\t'.join(errorReason[error])))
        self.log.info('Time to retrieve size of %d directories: %.1f seconds' %
                      (len(dirListSize), time.time() - startTime1))
        for dirPath in [d for d in dirList if d not in failed]:
            metadata = successfull.get(dirPath, {})
            if 'SubDirs' in metadata:
                self.__processDir(dirPath, metadata)
            else:
                if not self.__catalogType:
                    self.log.info('Catalog type determined to be DFC')
                    self.__catalogType = 'DFC'
                self.__processDirDFC(dirPath, metadata, dirContents[dirPath])
        self.log.info('Time to process %d directories: %.1f seconds' %
                      (nbDirs, time.time() - startTime))
        notCommited = len(self.__publishDirQueue) + len(self.__dirsToPublish)
        self.log.notice(
            "%d dirs to be explored, %d done. %d not yet committed." %
            (self.__dirExplorer.getNumRemainingDirs(), self.__processedDirs,
             notCommited))

    def __processDirDFC(self, dirPath, metadata, subDirectories):
        ''' gets the list of subdirs that the DFC doesn't return, set the metadata like the FC
    and then call the same method as for the FC '''
        if 'SubDirs' not in subDirectories:
            self.log.error('No subdirectory item for directory', dirPath)
            return
        dirMetadata = {
            'Files': 0,
            'TotalSize': 0,
            'ClosedDirs': [],
            'SiteUsage': {}
        }
        if 'PhysicalSize' in metadata:
            dirMetadata['Files'] = metadata['LogicalFiles']
            dirMetadata['TotalSize'] = metadata['LogicalSize']
            dirMetadata['SiteUsage'] = metadata['PhysicalSize'].copy()
            dirMetadata['SiteUsage'].pop('TotalFiles', None)
            dirMetadata['SiteUsage'].pop('TotalSize', None)
        subDirs = subDirectories['SubDirs'].copy()
        dirMetadata['SubDirs'] = subDirs
        dirUsage = dirMetadata['SiteUsage']
        errorReason = {}
        for subDir in subDirs:
            self.__directoryOwners.setdefault(
                subDir,
                (subDirs[subDir]['Owner'], subDirs[subDir]['OwnerGroup']))
            subDirs[subDir] = subDirs[subDir].get('CreationTime', dateTime())
            if dirUsage:
                # This part here is for removing the recursivity introduced by the DFC
                args = [subDir]
                if len(subDir.split('/')) > self.__keepDirLevels:
                    args += [True, self.__recalculateUsage]
                result = self.catalog.getDirectorySize(*args)
                if not result['OK']:
                    errorReason.setdefault(str(result['Message']),
                                           []).append(subDir)
                else:
                    metadata = result['Value']['Successful'].get(subDir)
                    if metadata:
                        dirMetadata['Files'] -= metadata['LogicalFiles']
                        dirMetadata['TotalSize'] -= metadata['LogicalSize']
                    else:
                        errorReason.setdefault(
                            str(result['Value']['Failed'][subDir],
                                [])).append(subDir)
                if 'PhysicalSize' in metadata and dirUsage:
                    seUsage = metadata['PhysicalSize']
                    seUsage.pop('TotalFiles', None)
                    seUsage.pop('TotalSize', None)
                    for se in seUsage:
                        if se not in dirUsage:
                            self.log.error('SE used in subdir but not in dir',
                                           se)
                        else:
                            dirUsage[se]['Files'] -= seUsage[se]['Files']
                            dirUsage[se]['Size'] -= seUsage[se]['Size']
        for error in errorReason:
            self.log.error(
                'Failed to get directory info',
                '- %s for:\n\t%s' % (error, '\n\t'.join(errorReason[error])))
        for se, usage in dirUsage.items():
            # Both info should be 0 or #0
            if not usage['Files'] and not usage['Size']:
                dirUsage.pop(se)
            elif not usage['Files'] * usage['Size']:
                self.log.error('Directory inconsistent',
                               '%s @ %s: %s' % (dirPath, se, str(usage)))
        return self.__processDir(dirPath, dirMetadata)

    def __processDir(self, dirPath, dirMetadata):
        ''' calculate nb of files and size of :dirPath:, remove it if it's empty '''
        subDirs = dirMetadata['SubDirs']
        closedDirs = dirMetadata['ClosedDirs']
        ##############################
        # FIXME: Until we understand while closed dirs are not working...
        ##############################
        closedDirs = []
        prStr = "%s: found %s sub-directories" % (dirPath, len(subDirs)
                                                  if subDirs else 'no')
        if closedDirs:
            prStr += ", %s are closed (ignored)" % len(closedDirs)
        for rmDir in closedDirs + self.__ignoreDirsList:
            subDirs.pop(rmDir, None)
        numberOfFiles = long(dirMetadata['Files'])
        totalSize = long(dirMetadata['TotalSize'])
        if numberOfFiles:
            prStr += " and %s files (%s bytes)" % (numberOfFiles, totalSize)
        else:
            prStr += " and no files"
        self.log.notice(prStr)
        if closedDirs:
            self.log.verbose("Closed dirs:\n %s" % '\n'.join(closedDirs))
        siteUsage = dirMetadata['SiteUsage']
        if numberOfFiles > 0:
            dirData = {
                'Files': numberOfFiles,
                'TotalSize': totalSize,
                'SEUsage': siteUsage
            }
            self.__addDirToPublishQueue(dirPath, dirData)
            # Print statistics
            self.log.verbose(
                "%-40s %20s %20s" %
                ('Storage Element', 'Number of files', 'Total size'))
            for storageElement in sorted(siteUsage):
                usageDict = siteUsage[storageElement]
                self.log.verbose(
                    "%-40s %20s %20s" % (storageElement, str(
                        usageDict['Files']), str(usageDict['Size'])))
        # If it's empty delete it
        elif len(subDirs) == 0 and len(closedDirs) == 0:
            if dirPath != self.__baseDir:
                self.removeEmptyDir(dirPath)
                return
        # We don't need the cached information about owner
        self.__directoryOwners.pop(dirPath, None)
        rightNow = dateTime()
        chosenDirs = [
            subDir
            for subDir in subDirs if not self.activePeriod or timeInterval(
                subDirs[subDir], self.activePeriod * week).includes(rightNow)
        ]

        self.__dirExplorer.addDirList(chosenDirs)
        self.__processedDirs += 1

    def __getOwnerProxy(self, dirPath):
        ''' get owner creds for :dirPath: '''
        self.log.verbose("Retrieving dir metadata...")
        # get owner form the cached information, if not, try getDirectoryMetadata
        ownerName, ownerGroup = self.__directoryOwners.pop(
            dirPath, (None, None))
        if not ownerName or not ownerGroup:
            result = returnSingleResult(
                self.catalog.getDirectoryMetadata(dirPath))
            if not result['OK'] or 'OwnerRole' not in result['Value']:
                self.log.error("Could not get metadata info",
                               result['Message'])
                return result
            ownerRole = result['Value']['OwnerRole']
            ownerDN = result['Value']['OwnerDN']
            if ownerRole[0] != "/":
                ownerRole = "/%s" % ownerRole
            cacheKey = (ownerDN, ownerRole)
            ownerName = 'unknown'
            byGroup = False
        else:
            ownerDN = Registry.getDNForUsername(ownerName)
            if not ownerDN['OK']:
                self.log.error("Could not get DN from user name",
                               ownerDN['Message'])
                return ownerDN
            ownerDN = ownerDN['Value'][0]
            # This bloody method returns directly a string!!!!
            ownerRole = Registry.getVOMSAttributeForGroup(ownerGroup)
            byGroup = True
            # Get all groups for that VOMS Role, and add lhcb_user as in DFC this is a safe value
        ownerGroups = Registry.getGroupsWithVOMSAttribute(ownerRole) + [
            'lhcb_user'
        ]

        downErrors = []
        for ownerGroup in ownerGroups:
            if byGroup:
                ownerRole = None
                cacheKey = (ownerDN, ownerGroup)
            if cacheKey in self.__noProxy:
                return S_ERROR("Proxy not available")
                # Getting the proxy...
            upFile = self.proxyCache.get(cacheKey, 3600)
            if upFile and os.path.exists(upFile):
                self.log.verbose(
                    'Returning cached proxy for %s %s@%s [%s] in %s' %
                    (ownerName, ownerDN, ownerGroup, ownerRole, upFile))
                return S_OK(upFile)
            if ownerRole:
                result = gProxyManager.downloadVOMSProxy(
                    ownerDN,
                    ownerGroup,
                    limited=False,
                    requiredVOMSAttribute=ownerRole)
            else:
                result = gProxyManager.downloadProxy(ownerDN,
                                                     ownerGroup,
                                                     limited=False)
            if not result['OK']:
                downErrors.append("%s : %s" % (cacheKey, result['Message']))
                continue
            userProxy = result['Value']
            secsLeft = max(0, userProxy.getRemainingSecs()['Value'])
            upFile = userProxy.dumpAllToFile()
            if upFile['OK']:
                upFile = upFile['Value']
            else:
                return upFile
            self.proxyCache.add(cacheKey, secsLeft, upFile)
            self.log.info("Got proxy for %s %s@%s [%s]" %
                          (ownerName, ownerDN, ownerGroup, ownerRole))
            return S_OK(upFile)
        self.__noProxy.add(cacheKey)
        return S_ERROR("Could not download proxy for user (%s, %s):\n%s " %
                       (ownerDN, ownerRole, "\n ".join(downErrors)))

    def removeEmptyDir(self, dirPath):
        self.log.notice("Deleting empty directory %s" % dirPath)
        for useOwnerProxy in (False, True):
            result = self.__removeEmptyDir(dirPath,
                                           useOwnerProxy=useOwnerProxy)
            if result['OK']:
                self.log.info(
                    "Successfully removed empty directory from File Catalog and StorageUsageDB"
                )
                break
        return result

    def __removeEmptyDir(self, dirPath, useOwnerProxy=True):
        ''' unlink empty folder :dirPath: '''
        from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
        if len(List.fromChar(dirPath, "/")) < self.__keepDirLevels:
            return S_OK()

        if useOwnerProxy:
            result = self.__getOwnerProxy(dirPath)
            if not result['OK']:
                if 'Proxy not available' not in result['Message']:
                    self.log.error(result['Message'])
                return result

            upFile = result['Value']
            prevProxyEnv = os.environ['X509_USER_PROXY']
            os.environ['X509_USER_PROXY'] = upFile
        try:
            gConfigurationData.setOptionInCFG(
                '/DIRAC/Security/UseServerCertificate', 'false')
            # res = self.catalog.removeDirectory( dirPath )
            res = self.catalog.writeCatalogs[0][1].removeDirectory(dirPath)
            if not res['OK']:
                self.log.error(
                    "Error removing empty directory from File Catalog.",
                    res['Message'])
                return res
            elif dirPath in res['Value']['Failed']:
                self.log.error(
                    "Failed to remove empty directory from File Catalog.",
                    res['Value']['Failed'][dirPath])
                self.log.debug(str(res))
                return S_ERROR(res['Value']['Failed'][dirPath])
            res = self.storageUsage.removeDirectory(dirPath)
            if not res['OK']:
                self.log.error(
                    "Failed to remove empty directory from Storage Usage database.",
                    res['Message'])
                return res
            return S_OK()
        finally:
            gConfigurationData.setOptionInCFG(
                '/DIRAC/Security/UseServerCertificate', 'true')
            if useOwnerProxy:
                os.environ['X509_USER_PROXY'] = prevProxyEnv

    def __addDirToPublishQueue(self, dirName, dirData):
        ''' enqueue :dirName: and :dirData: for publishing '''
        self.__publishDirQueue[dirName] = dirData
        numDirsToPublish = len(self.__publishDirQueue)
        if numDirsToPublish and numDirsToPublish % self.am_getOption(
                "PublishClusterSize", 100) == 0:
            self.__publishData(background=True)

    def __publishData(self, background=True):
        ''' publish data in a separate deamon thread '''
        self.dataLock.acquire()
        try:
            # Dump to file
            if self.am_getOption("DumpReplicasToFile", False):
                pass
                # repThread = threading.Thread( target = self.__writeReplicasListFiles,
                #                              args = ( list( self.__publishDirQueue ), ) )
            self.__dirsToPublish.update(self.__publishDirQueue)
            self.__publishDirQueue = {}
        finally:
            self.dataLock.release()
        if background:
            pubThread = threading.Thread(target=self.__executePublishData)
            pubThread.setDaemon(1)
            pubThread.start()
        else:
            self.__executePublishData()

    def __executePublishData(self):
        ''' publication thread target '''
        self.dataLock.acquire()
        try:
            if not self.__dirsToPublish:
                self.log.info("No data to be published")
                return
            if len(self.__dirsToPublish) > self.__maxToPublish:
                toPublish = {}
                for dirName in sorted(
                        self.__dirsToPublish)[:self.__maxToPublish]:
                    toPublish[dirName] = self.__dirsToPublish.pop(dirName)
            else:
                toPublish = self.__dirsToPublish
            self.log.info("Publishing usage for %d directories" %
                          len(toPublish))
            res = self.storageUsage.publishDirectories(toPublish)
            if res['OK']:
                # All is OK, reset the dictionary, even if data member!
                toPublish.clear()
            else:
                # Put back dirs to be published, due to the error
                self.__dirsToPublish.update(toPublish)
                self.log.error("Failed to publish directories", res['Message'])
            return res
        finally:
            self.dataLock.release()
Esempio n. 15
0
class DataCache:
    def __init__(self, dirName="accountingPlots"):
        self.graphsLocation = os.path.join(rootPath, "data", dirName)
        self.cachedGraphs = {}
        self.alive = True
        self.purgeThread = threading.Thread(target=self.purgeExpired)
        self.purgeThread.setDaemon(1)
        self.purgeThread.start()
        self.__dataCache = DictCache()
        self.__graphCache = DictCache(deleteFunction=self._deleteGraph)
        self.__dataLifeTime = 600
        self.__graphLifeTime = 3600

    def setGraphsLocation(self, graphsDir):
        self.graphsLocation = graphsDir
        for graphName in os.listdir(self.graphsLocation):
            if graphName.find(".png") > 0:
                graphLocation = "%s/%s" % (self.graphsLocation, graphName)
                gLogger.verbose("Purging %s" % graphLocation)
                os.unlink(graphLocation)

    def purgeExpired(self):
        while self.alive:
            time.sleep(600)
            self.__graphCache.purgeExpired()
            self.__dataCache.purgeExpired()

    def getReportData(self, reportRequest, reportHash, dataFunc):
        """
        Get report data from cache if exists, else generate it
        """
        reportData = self.__dataCache.get(reportHash)
        if not reportData:
            retVal = dataFunc(reportRequest)
            if not retVal["OK"]:
                return retVal
            reportData = retVal["Value"]
            self.__dataCache.add(reportHash, self.__dataLifeTime, reportData)
        return S_OK(reportData)

    def getReportPlot(self, reportRequest, reportHash, reportData, plotFunc):
        """
        Get report data from cache if exists, else generate it
        """
        plotDict = self.__graphCache.get(reportHash)
        if not plotDict:
            basePlotFileName = "%s/%s" % (self.graphsLocation, reportHash)
            retVal = plotFunc(reportRequest, reportData, basePlotFileName)
            if not retVal["OK"]:
                return retVal
            plotDict = retVal["Value"]
            if plotDict["plot"]:
                plotDict["plot"] = "%s.png" % reportHash
            if plotDict["thumbnail"]:
                plotDict["thumbnail"] = "%s.thb.png" % reportHash
            self.__graphCache.add(reportHash, self.__graphLifeTime, plotDict)
        return S_OK(plotDict)

    def getPlotData(self, plotFileName):
        filename = "%s/%s" % (self.graphsLocation, plotFileName)
        try:
            fd = open(filename, "rb")
            data = fd.read()
            fd.close()
        except Exception as e:
            return S_ERROR("Can't open file %s: %s" % (plotFileName, str(e)))
        return S_OK(data)

    def _deleteGraph(self, plotDict):
        try:
            for key in plotDict:
                value = plotDict[key]
                if value:
                    fPath = os.path.join(self.graphsLocation, str(value))
                    if os.path.isfile(fPath):
                        gLogger.info("Deleting plot from cache", value)
                        os.unlink(fPath)
                    else:
                        gLogger.info("Plot has already been deleted", value)
        except Exception:
            pass
Esempio n. 16
0
class ProxyManagerClient:
    __metaclass__ = DIRACSingleton.DIRACSingleton

    def __init__(self):
        self.__usersCache = DictCache()
        self.__proxiesCache = DictCache()
        self.__vomsProxiesCache = DictCache()
        self.__pilotProxiesCache = DictCache()
        self.__filesCache = DictCache(self.__deleteTemporalFile)

    def __deleteTemporalFile(self, filename):
        try:
            os.unlink(filename)
        except:
            pass

    def clearCaches(self):
        self.__usersCache.purgeAll()
        self.__proxiesCache.purgeAll()
        self.__vomsProxiesCache.purgeAll()
        self.__pilotProxiesCache.purgeAll()

    def __getSecondsLeftToExpiration(self, expiration, utc=True):
        if utc:
            td = expiration - datetime.datetime.utcnow()
        else:
            td = expiration - datetime.datetime.now()
        return td.days * 86400 + td.seconds

    def __refreshUserCache(self, validSeconds=0):
        rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        retVal = rpcClient.getRegisteredUsers(validSeconds)
        if not retVal['OK']:
            return retVal
        data = retVal['Value']
        #Update the cache
        for record in data:
            cacheKey = (record['DN'], record['group'])
            self.__usersCache.add(
                cacheKey,
                self.__getSecondsLeftToExpiration(record['expirationtime']),
                record)
        return S_OK()

    @gUsersSync
    def userHasProxy(self, userDN, userGroup, validSeconds=0):
        """
    Check if a user(DN-group) has a proxy in the proxy management
      - Updates internal cache if needed to minimize queries to the
          service
    """
        cacheKey = (userDN, userGroup)
        if self.__usersCache.exists(cacheKey, validSeconds):
            return S_OK(True)
        #Get list of users from the DB with proxys at least 300 seconds
        gLogger.verbose("Updating list of users in proxy management")
        retVal = self.__refreshUserCache(validSeconds)
        if not retVal['OK']:
            return retVal
        return S_OK(self.__usersCache.exists(cacheKey, validSeconds))

    @gUsersSync
    def getUserPersistence(self, userDN, userGroup, validSeconds=0):
        """
    Check if a user(DN-group) has a proxy in the proxy management
      - Updates internal cache if needed to minimize queries to the
          service
    """
        cacheKey = (userDN, userGroup)
        userData = self.__usersCache.get(cacheKey, validSeconds)
        if userData:
            if userData['persistent']:
                return S_OK(True)
        #Get list of users from the DB with proxys at least 300 seconds
        gLogger.verbose("Updating list of users in proxy management")
        retVal = self.__refreshUserCache(validSeconds)
        if not retVal['OK']:
            return retVal
        userData = self.__usersCache.get(cacheKey, validSeconds)
        if userData:
            return S_OK(userData['persistent'])
        return S_OK(False)

    def setPersistency(self, userDN, userGroup, persistent):
        """
    Set the persistency for user/group
    """
        #Hack to ensure bool in the rpc call
        persistentFlag = True
        if not persistent:
            persistentFlag = False
        rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        retVal = rpcClient.setPersistency(userDN, userGroup, persistentFlag)
        if not retVal['OK']:
            return retVal
        #Update internal persistency cache
        cacheKey = (userDN, userGroup)
        record = self.__usersCache.get(cacheKey, 0)
        if record:
            record['persistent'] = persistentFlag
            self.__usersCache.add(
                cacheKey,
                self.__getSecondsLeftToExpiration(record['expirationtime']),
                record)
        return retVal

    def uploadProxy(self,
                    proxy=False,
                    diracGroup=False,
                    chainToConnect=False,
                    restrictLifeTime=0):
        """
    Upload a proxy to the proxy management service using delgation
    """
        #Discover proxy location
        if type(proxy) == g_X509ChainType:
            chain = proxy
            proxyLocation = ""
        else:
            if not proxy:
                proxyLocation = Locations.getProxyLocation()
                if not proxyLocation:
                    return S_ERROR("Can't find a valid proxy")
            elif type(proxy) in (types.StringType, types.UnicodeType):
                proxyLocation = proxy
            else:
                return S_ERROR("Can't find a valid proxy")
            chain = X509Chain()
            result = chain.loadProxyFromFile(proxyLocation)
            if not result['OK']:
                return S_ERROR("Can't load %s: %s " %
                               (proxyLocation, result['Message']))

        if not chainToConnect:
            chainToConnect = chain

        #Make sure it's valid
        if chain.hasExpired()['Value']:
            return S_ERROR("Proxy %s has expired" % proxyLocation)

        #rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = chainToConnect )
        rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        #Get a delegation request
        result = rpcClient.requestDelegationUpload(
            chain.getRemainingSecs()['Value'], diracGroup)
        if not result['OK']:
            return result
        #Check if the delegation has been granted
        if 'Value' not in result or not result['Value']:
            if 'proxies' in result:
                return S_OK(result['proxies'])
            else:
                return S_OK()
        reqDict = result['Value']
        #Generate delegated chain
        chainLifeTime = chain.getRemainingSecs()['Value'] - 60
        if restrictLifeTime and restrictLifeTime < chainLifeTime:
            chainLifeTime = restrictLifeTime
        retVal = chain.generateChainFromRequestString(reqDict['request'],
                                                      lifetime=chainLifeTime,
                                                      diracGroup=diracGroup)
        if not retVal['OK']:
            return retVal
        #Upload!
        result = rpcClient.completeDelegationUpload(reqDict['id'],
                                                    retVal['Value'])
        if not result['OK']:
            return result
        if 'proxies' in result:
            return S_OK(result['proxies'])
        return S_OK()

    @gProxiesSync
    def downloadProxy(self,
                      userDN,
                      userGroup,
                      limited=False,
                      requiredTimeLeft=1200,
                      cacheTime=43200,
                      proxyToConnect=False,
                      token=False):
        """
    Get a proxy Chain from the proxy management
    """
        cacheKey = (userDN, userGroup)
        if self.__proxiesCache.exists(cacheKey, requiredTimeLeft):
            return S_OK(self.__proxiesCache.get(cacheKey))
        req = X509Request()
        req.generateProxyRequest(limited=limited)
        if proxyToConnect:
            rpcClient = RPCClient("Framework/ProxyManager",
                                  proxyChain=proxyToConnect,
                                  timeout=120)
        else:
            rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        if token:
            retVal = rpcClient.getProxyWithToken(
                userDN, userGroup,
                req.dumpRequest()['Value'], long(cacheTime + requiredTimeLeft),
                token)
        else:
            retVal = rpcClient.getProxy(userDN, userGroup,
                                        req.dumpRequest()['Value'],
                                        long(cacheTime + requiredTimeLeft))
        if not retVal['OK']:
            return retVal
        chain = X509Chain(keyObj=req.getPKey())
        retVal = chain.loadChainFromString(retVal['Value'])
        if not retVal['OK']:
            return retVal
        self.__proxiesCache.add(cacheKey,
                                chain.getRemainingSecs()['Value'], chain)
        return S_OK(chain)

    def downloadProxyToFile(self,
                            userDN,
                            userGroup,
                            limited=False,
                            requiredTimeLeft=1200,
                            cacheTime=43200,
                            filePath=False,
                            proxyToConnect=False,
                            token=False):
        """
    Get a proxy Chain from the proxy management and write it to file
    """
        retVal = self.downloadProxy(userDN, userGroup, limited,
                                    requiredTimeLeft, cacheTime,
                                    proxyToConnect, token)
        if not retVal['OK']:
            return retVal
        chain = retVal['Value']
        retVal = self.dumpProxyToFile(chain, filePath)
        if not retVal['OK']:
            return retVal
        retVal['chain'] = chain
        return retVal

    @gVOMSProxiesSync
    def downloadVOMSProxy(self,
                          userDN,
                          userGroup,
                          limited=False,
                          requiredTimeLeft=1200,
                          cacheTime=43200,
                          requiredVOMSAttribute=False,
                          proxyToConnect=False,
                          token=False):
        """
    Download a proxy if needed and transform it into a VOMS one
    """

        cacheKey = (userDN, userGroup, requiredVOMSAttribute, limited)
        if self.__vomsProxiesCache.exists(cacheKey, requiredTimeLeft):
            return S_OK(self.__vomsProxiesCache.get(cacheKey))
        req = X509Request()
        req.generateProxyRequest(limited=limited)
        if proxyToConnect:
            rpcClient = RPCClient("Framework/ProxyManager",
                                  proxyChain=proxyToConnect,
                                  timeout=120)
        else:
            rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        if token:
            retVal = rpcClient.getVOMSProxyWithToken(
                userDN, userGroup,
                req.dumpRequest()['Value'], long(cacheTime + requiredTimeLeft),
                token, requiredVOMSAttribute)

        else:
            retVal = rpcClient.getVOMSProxy(userDN, userGroup,
                                            req.dumpRequest()['Value'],
                                            long(cacheTime + requiredTimeLeft),
                                            requiredVOMSAttribute)
        if not retVal['OK']:
            return retVal
        chain = X509Chain(keyObj=req.getPKey())
        retVal = chain.loadChainFromString(retVal['Value'])
        if not retVal['OK']:
            return retVal
        self.__vomsProxiesCache.add(cacheKey,
                                    chain.getRemainingSecs()['Value'], chain)
        return S_OK(chain)

    def downloadVOMSProxyToFile(self,
                                userDN,
                                userGroup,
                                limited=False,
                                requiredTimeLeft=1200,
                                cacheTime=43200,
                                requiredVOMSAttribute=False,
                                filePath=False,
                                proxyToConnect=False,
                                token=False):
        """
    Download a proxy if needed, transform it into a VOMS one and write it to file
    """
        retVal = self.downloadVOMSProxy(userDN, userGroup, limited,
                                        requiredTimeLeft, cacheTime,
                                        requiredVOMSAttribute, proxyToConnect,
                                        token)
        if not retVal['OK']:
            return retVal
        chain = retVal['Value']
        retVal = self.dumpProxyToFile(chain, filePath)
        if not retVal['OK']:
            return retVal
        retVal['chain'] = chain
        return retVal

    def getPilotProxyFromDIRACGroup(self,
                                    userDN,
                                    userGroup,
                                    requiredTimeLeft=43200,
                                    proxyToConnect=False):
        """
    Download a pilot proxy with VOMS extensions depending on the group
    """
        #Assign VOMS attribute
        vomsAttr = CS.getVOMSAttributeForGroup(userGroup)
        if not vomsAttr:
            gLogger.verbose(
                "No voms attribute assigned to group %s when requested pilot proxy"
                % userGroup)
            return self.downloadProxy(userDN,
                                      userGroup,
                                      limited=False,
                                      requiredTimeLeft=requiredTimeLeft,
                                      proxyToConnect=proxyToConnect)
        else:
            return self.downloadVOMSProxy(userDN,
                                          userGroup,
                                          limited=False,
                                          requiredTimeLeft=requiredTimeLeft,
                                          requiredVOMSAttribute=vomsAttr,
                                          proxyToConnect=proxyToConnect)

    def getPilotProxyFromVOMSGroup(self,
                                   userDN,
                                   vomsAttr,
                                   requiredTimeLeft=43200,
                                   proxyToConnect=False):
        """
    Download a pilot proxy with VOMS extensions depending on the group
    """
        groups = CS.getGroupsWithVOMSAttribute(vomsAttr)
        if not groups:
            return S_ERROR("No group found that has %s as voms attrs" %
                           vomsAttr)

        for userGroup in groups:
            result = self.downloadVOMSProxy(userDN,
                                            userGroup,
                                            limited=False,
                                            requiredTimeLeft=requiredTimeLeft,
                                            requiredVOMSAttribute=vomsAttr,
                                            proxyToConnect=proxyToConnect)
            if result['OK']:
                return result
        return result

    def getPayloadProxyFromDIRACGroup(self,
                                      userDN,
                                      userGroup,
                                      requiredTimeLeft,
                                      token=False,
                                      proxyToConnect=False):
        """
    Download a payload proxy with VOMS extensions depending on the group
    """
        #Assign VOMS attribute
        vomsAttr = CS.getVOMSAttributeForGroup(userGroup)
        if not vomsAttr:
            gLogger.verbose(
                "No voms attribute assigned to group %s when requested payload proxy"
                % userGroup)
            return self.downloadProxy(userDN,
                                      userGroup,
                                      limited=True,
                                      requiredTimeLeft=requiredTimeLeft,
                                      proxyToConnect=proxyToConnect,
                                      token=token)
        else:
            return self.downloadVOMSProxy(userDN,
                                          userGroup,
                                          limited=True,
                                          requiredTimeLeft=requiredTimeLeft,
                                          requiredVOMSAttribute=vomsAttr,
                                          proxyToConnect=proxyToConnect,
                                          token=token)

    def getPayloadProxyFromVOMSGroup(self,
                                     userDN,
                                     vomsAttr,
                                     token,
                                     requiredTimeLeft,
                                     proxyToConnect=False):
        """
    Download a payload proxy with VOMS extensions depending on the VOMS attr
    """
        groups = CS.getGroupsWithVOMSAttribute(vomsAttr)
        if not groups:
            return S_ERROR("No group found that has %s as voms attrs" %
                           vomsAttr)
        userGroup = groups[0]

        return self.downloadVOMSProxy(userDN,
                                      userGroup,
                                      limited=True,
                                      requiredTimeLeft=requiredTimeLeft,
                                      requiredVOMSAttribute=vomsAttr,
                                      proxyToConnect=proxyToConnect,
                                      token=token)

    def dumpProxyToFile(self,
                        chain,
                        destinationFile=False,
                        requiredTimeLeft=600):
        """
    Dump a proxy to a file. It's cached so multiple calls won't generate extra files
    """
        result = chain.hash()
        if not result['OK']:
            return result
        hash = result['Value']
        if self.__filesCache.exists(hash, requiredTimeLeft):
            filepath = self.__filesCache.get(hash)
            if os.path.isfile(filepath):
                return S_OK(filepath)
            self.__filesCache.delete(hash)
        retVal = chain.dumpAllToFile(destinationFile)
        if not retVal['OK']:
            return retVal
        filename = retVal['Value']
        self.__filesCache.add(hash,
                              chain.getRemainingSecs()['Value'], filename)
        return S_OK(filename)

    def deleteGeneratedProxyFile(self, chain):
        """
    Delete a file generated by a dump
    """
        self.__filesCache.delete(chain)
        return S_OK()

    def requestToken(self, requesterDN, requesterGroup, numUses=1):
        """
    Request a number of tokens. usesList must be a list of integers and each integer is the number of uses a token
    must have
    """
        rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        return rpcClient.generateToken(requesterDN, requesterGroup, numUses)

    def renewProxy(self,
                   proxyToBeRenewed=False,
                   minLifeTime=3600,
                   newProxyLifeTime=43200,
                   proxyToConnect=False):
        """
    Renew a proxy using the ProxyManager
    Arguments:
      proxyToBeRenewed : proxy to renew
      minLifeTime : if proxy life time is less than this, renew. Skip otherwise
      newProxyLifeTime : life time of new proxy
      proxyToConnect : proxy to use for connecting to the service
    """
        retVal = File.multiProxyArgument(proxyToBeRenewed)
        if not retVal['Value']:
            return retVal
        proxyToRenewDict = retVal['Value']

        secs = proxyToRenewDict['chain'].getRemainingSecs()['Value']
        if secs > minLifeTime:
            File.deleteMultiProxy(proxyToRenewDict)
            return S_OK()

        if not proxyToConnect:
            proxyToConnectDict = {'chain': False, 'tempFile': False}
        else:
            retVal = File.multiProxyArgument(proxyToConnect)
            if not retVal['Value']:
                File.deleteMultiProxy(proxyToRenewDict)
                return retVal
            proxyToConnectDict = retVal['Value']

        userDN = proxyToRenewDict['chain'].getIssuerCert(
        )['Value'].getSubjectDN()['Value']
        retVal = proxyToRenewDict['chain'].getDIRACGroup()
        if not retVal['OK']:
            File.deleteMultiProxy(proxyToRenewDict)
            File.deleteMultiProxy(proxyToConnectDict)
            return retVal
        userGroup = retVal['Value']
        limited = proxyToRenewDict['chain'].isLimitedProxy()['Value']

        voms = VOMS()
        retVal = voms.getVOMSAttributes(proxyToRenewDict['chain'])
        if not retVal['OK']:
            File.deleteMultiProxy(proxyToRenewDict)
            File.deleteMultiProxy(proxyToConnectDict)
            return retVal
        vomsAttrs = retVal['Value']
        if vomsAttrs:
            retVal = self.downloadVOMSProxy(
                userDN,
                userGroup,
                limited=limited,
                requiredTimeLeft=newProxyLifeTime,
                requiredVOMSAttribute=vomsAttrs[0],
                proxyToConnect=proxyToConnectDict['chain'])
        else:
            retVal = self.downloadProxy(
                userDN,
                userGroup,
                limited=limited,
                requiredTimeLeft=newProxyLifeTime,
                proxyToConnect=proxyToConnectDict['chain'])

        File.deleteMultiProxy(proxyToRenewDict)
        File.deleteMultiProxy(proxyToConnectDict)

        if not retVal['OK']:
            return retVal

        chain = retVal['Value']

        if not proxyToRenewDict['tempFile']:
            return chain.dumpAllToFile(proxyToRenewDict['file'])

        return S_OK(chain)

    def getDBContents(self, condDict={}):
        """
    Get the contents of the db
    """
        rpcClient = RPCClient("Framework/ProxyManager", timeout=120)
        return rpcClient.getContents(condDict, [['UserDN', 'DESC']], 0, 0)

    def getVOMSAttributes(self, chain):
        """
    Get the voms attributes for a chain
    """
        return VOMS().getVOMSAttributes(chain)

    def getUploadedProxyLifeTime(self, DN, group):
        """
    Get the remaining seconds for an uploaded proxy
    """
        result = self.getDBContents({'UserDN': [DN], 'UserGroup': [group]})
        if not result['OK']:
            return result
        data = result['Value']
        if len(data['Records']) == 0:
            return S_OK(0)
        pNames = list(data['ParameterNames'])
        dnPos = pNames.index('UserDN')
        groupPos = pNames.index('UserGroup')
        expiryPos = pNames.index('ExpirationTime')
        for row in data['Records']:
            if DN == row[dnPos] and group == row[groupPos]:
                td = row[expiryPos] - datetime.datetime.utcnow()
                secondsLeft = td.days * 86400 + td.seconds
                return S_OK(max(0, secondsLeft))
        return S_OK(0)

    def getUserProxiesInfo(self):
        """ Get the user proxies uploaded info
    """
        result = RPCClient("Framework/ProxyManager",
                           timeout=120).getUserProxiesInfo()
        if 'rpcStub' in result:
            result.pop('rpcStub')
        return result
Esempio n. 17
0
class ProxyManagerClient:
  __metaclass__ = DIRACSingleton.DIRACSingleton

  def __init__( self ):
    self.__usersCache = DictCache()
    self.__proxiesCache = DictCache()
    self.__vomsProxiesCache = DictCache()
    self.__pilotProxiesCache = DictCache()
    self.__filesCache = DictCache( self.__deleteTemporalFile )

  def __deleteTemporalFile( self, filename ):
    try:
      os.unlink( filename )
    except:
      pass

  def clearCaches( self ):
    self.__usersCache.purgeAll()
    self.__proxiesCache.purgeAll()
    self.__vomsProxiesCache.purgeAll()
    self.__pilotProxiesCache.purgeAll()

  def __getSecondsLeftToExpiration( self, expiration, utc = True ):
    if utc:
      td = expiration - datetime.datetime.utcnow()
    else:
      td = expiration - datetime.datetime.now()
    return td.days * 86400 + td.seconds

  def __refreshUserCache( self, validSeconds = 0 ):
    rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    retVal = rpcClient.getRegisteredUsers( validSeconds )
    if not retVal[ 'OK' ]:
      return retVal
    data = retVal[ 'Value' ]
    #Update the cache
    for record in data:
      cacheKey = ( record[ 'DN' ], record[ 'group' ] )
      self.__usersCache.add( cacheKey,
                             self.__getSecondsLeftToExpiration( record[ 'expirationtime' ] ),
                             record )
    return S_OK()

  @gUsersSync
  def userHasProxy( self, userDN, userGroup, validSeconds = 0 ):
    """
    Check if a user(DN-group) has a proxy in the proxy management
      - Updates internal cache if needed to minimize queries to the
          service
    """
    cacheKey = ( userDN, userGroup )
    if self.__usersCache.exists( cacheKey, validSeconds ):
      return S_OK( True )
    #Get list of users from the DB with proxys at least 300 seconds
    gLogger.verbose( "Updating list of users in proxy management" )
    retVal = self.__refreshUserCache( validSeconds )
    if not retVal[ 'OK' ]:
      return retVal
    return S_OK( self.__usersCache.exists( cacheKey, validSeconds ) )

  @gUsersSync
  def getUserPersistence( self, userDN, userGroup, validSeconds = 0 ):
    """
    Check if a user(DN-group) has a proxy in the proxy management
      - Updates internal cache if needed to minimize queries to the
          service
    """
    cacheKey = ( userDN, userGroup )
    userData = self.__usersCache.get( cacheKey, validSeconds )
    if userData:
      if userData[ 'persistent' ]:
        return S_OK( True )
    #Get list of users from the DB with proxys at least 300 seconds
    gLogger.verbose( "Updating list of users in proxy management" )
    retVal = self.__refreshUserCache( validSeconds )
    if not retVal[ 'OK' ]:
      return retVal
    userData = self.__usersCache.get( cacheKey, validSeconds )
    if userData:
      return S_OK( userData[ 'persistent' ] )
    return S_OK( False )

  def setPersistency( self, userDN, userGroup, persistent ):
    """
    Set the persistency for user/group
    """
    #Hack to ensure bool in the rpc call
    persistentFlag = True
    if not persistent:
      persistentFlag = False
    rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    retVal = rpcClient.setPersistency( userDN, userGroup, persistentFlag )
    if not retVal[ 'OK' ]:
      return retVal
    #Update internal persistency cache
    cacheKey = ( userDN, userGroup )
    record = self.__usersCache.get( cacheKey, 0 )
    if record:
      record[ 'persistent' ] = persistentFlag
      self.__usersCache.add( cacheKey,
                             self.__getSecondsLeftToExpiration( record[ 'expirationtime' ] ),
                             record )
    return retVal

  def uploadProxy( self, proxy = False, diracGroup = False, chainToConnect = False, restrictLifeTime = 0, rfcIfPossible = False ):
    """
    Upload a proxy to the proxy management service using delegation
    """
    #Discover proxy location
    if type( proxy ) == g_X509ChainType:
      chain = proxy
      proxyLocation = ""
    else:
      if not proxy:
        proxyLocation = Locations.getProxyLocation()
        if not proxyLocation:
          return S_ERROR( "Can't find a valid proxy" )
      elif isinstance( proxy, basestring ):
        proxyLocation = proxy
      else:
        return S_ERROR( "Can't find a valid proxy" )
      chain = X509Chain()
      result = chain.loadProxyFromFile( proxyLocation )
      if not result[ 'OK' ]:
        return S_ERROR( "Can't load %s: %s " % ( proxyLocation, result[ 'Message' ] ) )

    if not chainToConnect:
      chainToConnect = chain

    #Make sure it's valid
    if chain.hasExpired()[ 'Value' ]:
      return S_ERROR( "Proxy %s has expired" % proxyLocation )

    #rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = chainToConnect )
    rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    #Get a delegation request
    result = rpcClient.requestDelegationUpload( chain.getRemainingSecs()['Value'], diracGroup )
    if not result[ 'OK' ]:
      return result
    #Check if the delegation has been granted
    if 'Value' not in result or not result[ 'Value' ]:
      if 'proxies' in result:
        return S_OK( result[ 'proxies' ] )
      else:
        return S_OK()
    reqDict = result[ 'Value' ]
    #Generate delegated chain
    chainLifeTime = chain.getRemainingSecs()[ 'Value' ] - 60
    if restrictLifeTime and restrictLifeTime < chainLifeTime:
      chainLifeTime = restrictLifeTime
    retVal = chain.generateChainFromRequestString( reqDict[ 'request' ],
                                                   lifetime = chainLifeTime,
                                                   diracGroup = diracGroup, rfc = rfcIfPossible)
    if not retVal[ 'OK' ]:
      return retVal
    #Upload!
    result = rpcClient.completeDelegationUpload( reqDict[ 'id' ], retVal[ 'Value' ] )
    if not result[ 'OK' ]:
      return result
    if 'proxies' in result:
      return S_OK( result[ 'proxies' ] )
    return S_OK()


  @gProxiesSync
  def downloadProxy( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
                     cacheTime = 43200, proxyToConnect = False, token = False ):
    """
    Get a proxy Chain from the proxy management
    """
    cacheKey = ( userDN, userGroup )
    if self.__proxiesCache.exists( cacheKey, requiredTimeLeft ):
      return S_OK( self.__proxiesCache.get( cacheKey ) )
    req = X509Request()
    req.generateProxyRequest( limited = limited )
    if proxyToConnect:
      rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = proxyToConnect, timeout = 120 )
    else:
      rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    if token:
      retVal = rpcClient.getProxyWithToken( userDN, userGroup, req.dumpRequest()['Value'],
                                   long( cacheTime + requiredTimeLeft ), token )
    else:
      retVal = rpcClient.getProxy( userDN, userGroup, req.dumpRequest()['Value'],
                                   long( cacheTime + requiredTimeLeft ) )
    if not retVal[ 'OK' ]:
      return retVal
    chain = X509Chain( keyObj = req.getPKey() )
    retVal = chain.loadChainFromString( retVal[ 'Value' ] )
    if not retVal[ 'OK' ]:
      return retVal
    self.__proxiesCache.add( cacheKey, chain.getRemainingSecs()['Value'], chain )
    return S_OK( chain )

  def downloadProxyToFile( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
                           cacheTime = 43200, filePath = False, proxyToConnect = False, token = False ):
    """
    Get a proxy Chain from the proxy management and write it to file
    """
    retVal = self.downloadProxy( userDN, userGroup, limited, requiredTimeLeft, cacheTime, proxyToConnect, token )
    if not retVal[ 'OK' ]:
      return retVal
    chain = retVal[ 'Value' ]
    retVal = self.dumpProxyToFile( chain, filePath )
    if not retVal[ 'OK' ]:
      return retVal
    retVal[ 'chain' ] = chain
    return retVal

  @gVOMSProxiesSync
  def downloadVOMSProxy( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
                         cacheTime = 43200, requiredVOMSAttribute = False, proxyToConnect = False, token = False ):
    """
    Download a proxy if needed and transform it into a VOMS one
    """

    cacheKey = ( userDN, userGroup, requiredVOMSAttribute, limited )
    if self.__vomsProxiesCache.exists( cacheKey, requiredTimeLeft ):
      return S_OK( self.__vomsProxiesCache.get( cacheKey ) )
    req = X509Request()
    req.generateProxyRequest( limited = limited )
    if proxyToConnect:
      rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = proxyToConnect, timeout = 120 )
    else:
      rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    if token:
      retVal = rpcClient.getVOMSProxyWithToken( userDN, userGroup, req.dumpRequest()['Value'],
                                                long( cacheTime + requiredTimeLeft ), token, requiredVOMSAttribute )

    else:
      retVal = rpcClient.getVOMSProxy( userDN, userGroup, req.dumpRequest()['Value'],
                                       long( cacheTime + requiredTimeLeft ), requiredVOMSAttribute )
    if not retVal[ 'OK' ]:
      return retVal
    chain = X509Chain( keyObj = req.getPKey() )
    retVal = chain.loadChainFromString( retVal[ 'Value' ] )
    if not retVal[ 'OK' ]:
      return retVal
    self.__vomsProxiesCache.add( cacheKey, chain.getRemainingSecs()['Value'], chain )
    return S_OK( chain )

  def downloadVOMSProxyToFile( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200, cacheTime = 43200,
                               requiredVOMSAttribute = False, filePath = False, proxyToConnect = False, token = False ):
    """
    Download a proxy if needed, transform it into a VOMS one and write it to file
    """
    retVal = self.downloadVOMSProxy( userDN, userGroup, limited, requiredTimeLeft, cacheTime,
                                     requiredVOMSAttribute, proxyToConnect, token )
    if not retVal[ 'OK' ]:
      return retVal
    chain = retVal[ 'Value' ]
    retVal = self.dumpProxyToFile( chain, filePath )
    if not retVal[ 'OK' ]:
      return retVal
    retVal[ 'chain' ] = chain
    return retVal

  def getPilotProxyFromDIRACGroup( self, userDN, userGroup, requiredTimeLeft = 43200, proxyToConnect = False ):
    """
    Download a pilot proxy with VOMS extensions depending on the group
    """
    #Assign VOMS attribute
    vomsAttr = CS.getVOMSAttributeForGroup( userGroup )
    if not vomsAttr:
      gLogger.verbose( "No voms attribute assigned to group %s when requested pilot proxy" % userGroup )
      return self.downloadProxy( userDN, userGroup, limited = False, requiredTimeLeft = requiredTimeLeft,
                                 proxyToConnect = proxyToConnect )
    else:
      return self.downloadVOMSProxy( userDN, userGroup, limited = False, requiredTimeLeft = requiredTimeLeft,
                                     requiredVOMSAttribute = vomsAttr, proxyToConnect = proxyToConnect )

  def getPilotProxyFromVOMSGroup( self, userDN, vomsAttr, requiredTimeLeft = 43200, proxyToConnect = False ):
    """
    Download a pilot proxy with VOMS extensions depending on the group
    """
    groups = CS.getGroupsWithVOMSAttribute( vomsAttr )
    if not groups:
      return S_ERROR( "No group found that has %s as voms attrs" % vomsAttr )

    for userGroup in groups:
      result = self.downloadVOMSProxy( userDN,
                                     userGroup,
                                     limited = False,
                                     requiredTimeLeft = requiredTimeLeft,
                                     requiredVOMSAttribute = vomsAttr,
                                     proxyToConnect = proxyToConnect )
      if result['OK']:
        return result
    return result

  def getPayloadProxyFromDIRACGroup( self, userDN, userGroup, requiredTimeLeft, token = False, proxyToConnect = False ):
    """
    Download a payload proxy with VOMS extensions depending on the group
    """
    #Assign VOMS attribute
    vomsAttr = CS.getVOMSAttributeForGroup( userGroup )
    if not vomsAttr:
      gLogger.verbose( "No voms attribute assigned to group %s when requested payload proxy" % userGroup )
      return self.downloadProxy( userDN, userGroup, limited = True, requiredTimeLeft = requiredTimeLeft,
                                 proxyToConnect = proxyToConnect, token = token )
    else:
      return self.downloadVOMSProxy( userDN, userGroup, limited = True, requiredTimeLeft = requiredTimeLeft,
                                     requiredVOMSAttribute = vomsAttr, proxyToConnect = proxyToConnect,
                                     token = token )

  def getPayloadProxyFromVOMSGroup( self, userDN, vomsAttr, token, requiredTimeLeft, proxyToConnect = False ):
    """
    Download a payload proxy with VOMS extensions depending on the VOMS attr
    """
    groups = CS.getGroupsWithVOMSAttribute( vomsAttr )
    if not groups:
      return S_ERROR( "No group found that has %s as voms attrs" % vomsAttr )
    userGroup = groups[0]

    return self.downloadVOMSProxy( userDN,
                                   userGroup,
                                   limited = True,
                                   requiredTimeLeft = requiredTimeLeft,
                                   requiredVOMSAttribute = vomsAttr,
                                   proxyToConnect = proxyToConnect,
                                   token = token )


  def dumpProxyToFile( self, chain, destinationFile = False, requiredTimeLeft = 600 ):
    """
    Dump a proxy to a file. It's cached so multiple calls won't generate extra files
    """
    result = chain.hash()
    if not result[ 'OK' ]:
      return result
    cHash = result[ 'Value' ]
    if self.__filesCache.exists( cHash, requiredTimeLeft ):
      filepath = self.__filesCache.get( cHash )
      if filepath and os.path.isfile( filepath ):
        return S_OK( filepath )
      self.__filesCache.delete( cHash )
    retVal = chain.dumpAllToFile( destinationFile )
    if not retVal[ 'OK' ]:
      return retVal
    filename = retVal[ 'Value' ]
    self.__filesCache.add( cHash, chain.getRemainingSecs()['Value'], filename )
    return S_OK( filename )

  def deleteGeneratedProxyFile( self, chain ):
    """
    Delete a file generated by a dump
    """
    self.__filesCache.delete( chain )
    return S_OK()

  def requestToken( self, requesterDN, requesterGroup, numUses = 1 ):
    """
    Request a number of tokens. usesList must be a list of integers and each integer is the number of uses a token
    must have
    """
    rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    return rpcClient.generateToken( requesterDN, requesterGroup, numUses )

  def renewProxy( self, proxyToBeRenewed = False, minLifeTime = 3600, newProxyLifeTime = 43200, proxyToConnect = False ):
    """
    Renew a proxy using the ProxyManager
    Arguments:
      proxyToBeRenewed : proxy to renew
      minLifeTime : if proxy life time is less than this, renew. Skip otherwise
      newProxyLifeTime : life time of new proxy
      proxyToConnect : proxy to use for connecting to the service
    """
    retVal = multiProxyArgument( proxyToBeRenewed )
    if not retVal[ 'Value' ]:
      return retVal
    proxyToRenewDict = retVal[ 'Value' ]

    secs = proxyToRenewDict[ 'chain' ].getRemainingSecs()[ 'Value' ]
    if secs > minLifeTime:
      deleteMultiProxy( proxyToRenewDict )
      return S_OK()

    if not proxyToConnect:
      proxyToConnectDict = { 'chain': False, 'tempFile': False }
    else:
      retVal = multiProxyArgument( proxyToConnect )
      if not retVal[ 'Value' ]:
        deleteMultiProxy( proxyToRenewDict )
        return retVal
      proxyToConnectDict = retVal[ 'Value' ]

    userDN = proxyToRenewDict[ 'chain' ].getIssuerCert()[ 'Value' ].getSubjectDN()[ 'Value' ]
    retVal = proxyToRenewDict[ 'chain' ].getDIRACGroup()
    if not retVal[ 'OK' ]:
      deleteMultiProxy( proxyToRenewDict )
      deleteMultiProxy( proxyToConnectDict )
      return retVal
    userGroup = retVal[ 'Value' ]
    limited = proxyToRenewDict[ 'chain' ].isLimitedProxy()[ 'Value' ]

    voms = VOMS()
    retVal = voms.getVOMSAttributes( proxyToRenewDict[ 'chain' ] )
    if not retVal[ 'OK' ]:
      deleteMultiProxy( proxyToRenewDict )
      deleteMultiProxy( proxyToConnectDict )
      return retVal
    vomsAttrs = retVal[ 'Value' ]
    if vomsAttrs:
      retVal = self.downloadVOMSProxy( userDN,
                                       userGroup,
                                       limited = limited,
                                       requiredTimeLeft = newProxyLifeTime,
                                       requiredVOMSAttribute = vomsAttrs[0],
                                       proxyToConnect = proxyToConnectDict[ 'chain' ] )
    else:
      retVal = self.downloadProxy( userDN,
                                   userGroup,
                                   limited = limited,
                                   requiredTimeLeft = newProxyLifeTime,
                                   proxyToConnect = proxyToConnectDict[ 'chain' ] )

    deleteMultiProxy( proxyToRenewDict )
    deleteMultiProxy( proxyToConnectDict )

    if not retVal[ 'OK' ]:
      return retVal

    chain = retVal['Value']

    if not proxyToRenewDict[ 'tempFile' ]:
      return chain.dumpAllToFile( proxyToRenewDict[ 'file' ] )

    return S_OK( chain )

  def getDBContents( self, condDict = {} ):
    """
    Get the contents of the db
    """
    rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
    return rpcClient.getContents( condDict, [ [ 'UserDN', 'DESC' ] ], 0, 0 )

  def getVOMSAttributes( self, chain ):
    """
    Get the voms attributes for a chain
    """
    return VOMS().getVOMSAttributes( chain )

  def getUploadedProxyLifeTime( self, DN, group ):
    """
    Get the remaining seconds for an uploaded proxy
    """
    result = self.getDBContents( { 'UserDN' : [ DN ], 'UserGroup' : [ group ] } )
    if not result[ 'OK' ]:
      return result
    data = result[ 'Value' ]
    if len( data[ 'Records' ] ) == 0:
      return S_OK( 0 )
    pNames = list( data[ 'ParameterNames' ] )
    dnPos = pNames.index( 'UserDN' )
    groupPos = pNames.index( 'UserGroup' )
    expiryPos = pNames.index( 'ExpirationTime' )
    for row in data[ 'Records' ]:
      if DN == row[ dnPos ] and group == row[ groupPos ]:
        td = row[ expiryPos ] - datetime.datetime.utcnow()
        secondsLeft = td.days * 86400 + td.seconds
        return S_OK( max( 0, secondsLeft ) )
    return S_OK( 0 )

  def getUserProxiesInfo( self ):
    """ Get the user proxies uploaded info
    """
    result = RPCClient( "Framework/ProxyManager", timeout = 120 ).getUserProxiesInfo()
    if 'rpcStub' in result:
      result.pop( 'rpcStub' )
    return result
Esempio n. 18
0
class Cache(object):
    """
    Cache basic class.

    WARNING: None of its methods is thread safe. Acquire / Release lock when
    using them !
  """
    def __init__(self, lifeTime, updateFunc):
        """
    Constructor

    :Parameters:
      **lifeTime** - `int`
        Lifetime of the elements in the cache ( seconds ! )
      **updateFunc** - `function`
        This function MUST return a S_OK | S_ERROR object. In the case of the first,
        its value must be a dictionary.

    """

        # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
        # starting at the same time, all the caches will not end at the same time.
        randomLifeTimeBias = 0.2 * random.random()

        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self.__lifeTime = int(lifeTime * (1 + randomLifeTimeBias))
        self.__updateFunc = updateFunc
        # The records returned from the cache must be valid at least 30 seconds.
        self.__validSeconds = 30

        # Cache
        self.__cache = DictCache()
        self.__cacheLock = LockRing()
        self.__cacheLock.getLock(self.__class__.__name__)

    #.............................................................................
    # internal cache object getter

    def cacheKeys(self):
        """
    Cache keys getter

    :returns: list with valid keys on the cache
    """

        return self.__cache.getKeys(validSeconds=self.__validSeconds)

    #.............................................................................
    # acquire / release Locks

    def acquireLock(self):
        """
    Acquires Cache lock
    """

        self.__cacheLock.acquire(self.__class__.__name__)

    def releaseLock(self):
        """
    Releases Cache lock
    """

        self.__cacheLock.release(self.__class__.__name__)

    #.............................................................................
    # Cache getters

    def get(self, cacheKeys):
        """
    Gets values for cacheKeys given, if all are found ( present on the cache and
    valid ), returns S_OK with the results. If any is not neither present not
    valid, returns S_ERROR.

    :Parameters:
      **cacheKeys** - `list`
        list of keys to be extracted from the cache

    :return: S_OK | S_ERROR
    """

        result = {}

        for cacheKey in cacheKeys:
            cacheRow = self.__cache.get(cacheKey,
                                        validSeconds=self.__validSeconds)

            if not cacheRow:
                return S_ERROR('Cannot get %s' % str(cacheKey))
            result.update({cacheKey: cacheRow})

        return S_OK(result)

    #.............................................................................
    # Cache refreshers

    def refreshCache(self):
        """
    Purges the cache and gets fresh data from the update function.

    :return: S_OK | S_ERROR. If the first, its content is the new cache.
    """

        self.log.verbose('refreshing...')

        self.__cache.purgeAll()

        newCache = self.__updateFunc()
        if not newCache['OK']:
            self.log.error(newCache['Message'])
            return newCache

        newCache = self.__updateCache(newCache['Value'])

        self.log.verbose('refreshed')

        return newCache

    #.............................................................................
    # Private methods

    def __updateCache(self, newCache):
        """
    Given the new cache dictionary, updates the internal cache with it. It sets
    a duration to the entries of <self.__lifeTime> seconds.

    :Parameters:
      **newCache** - `dict`
        dictionary containing a new cache

    :return: dictionary. It is newCache argument.
    """

        for cacheKey, cacheValue in newCache.items():
            self.__cache.add(cacheKey, self.__lifeTime, value=cacheValue)

        # We are assuming nothing will fail while inserting in the cache. There is
        # no apparent reason to suspect from that piece of code.
        return S_OK(newCache)
Esempio n. 19
0
class GatewayService(Service):
    """Inherits from Service so it can (and should) be run as a DIRAC service,
    but replaces several of the internal methods
    """

    GATEWAY_NAME = "Framework/Gateway"

    def __init__(self):
        """Initialize like a real service"""
        super(GatewayService, self).__init__(
            {
                "modName": GatewayService.GATEWAY_NAME,
                "loadName": GatewayService.GATEWAY_NAME,
                "standalone": True,
                "moduleObj": sys.modules[DIRAC.Core.DISET.private.GatewayService.GatewayService.__module__],
                "classObj": self.__class__,
            }
        )
        self.__delegatedCredentials = DictCache()
        self.__transferBytesLimit = 1024 * 1024 * 100
        # to be resolved
        self._url = None
        self._handler = None
        self._threadPool = None
        self._msgBroker = None
        self._msgForwarder = None

    def initialize(self):
        """This replaces the standard initialize from Service"""
        # Build the URLs
        self._url = self._cfg.getURL()
        if not self._url:
            return S_ERROR("Could not build service URL for %s" % GatewayService.GATEWAY_NAME)
        gLogger.verbose("Service URL is %s" % self._url)
        # Load handler
        result = self._loadHandlerInit()
        if not result["OK"]:
            return result
        self._handler = result["Value"]
        # Discover Handler
        self._threadPool = ThreadPoolExecutor(max(0, self._cfg.getMaxThreads()))

        self._msgBroker = MessageBroker("%sMSB" % GatewayService.GATEWAY_NAME, threadPool=self._threadPool)
        self._msgBroker.useMessageObjects(False)
        getGlobalMessageBroker().useMessageObjects(False)
        self._msgForwarder = MessageForwarder(self._msgBroker)
        return S_OK()

    def _processInThread(self, clientTransport):
        """Threaded process function"""
        # Handshake
        try:
            clientTransport.handshake()
        except Exception:
            return
        # Add to the transport pool
        trid = self._transportPool.add(clientTransport)
        if not trid:
            return
        # Receive and check proposal
        result = self._receiveAndCheckProposal(trid)
        if not result["OK"]:
            self._transportPool.sendAndClose(trid, result)
            return
        proposalTuple = result["Value"]
        # Instantiate handler
        result = self.__getClientInitArgs(trid, proposalTuple)
        if not result["OK"]:
            self._transportPool.sendAndClose(trid, result)
            return
        clientInitArgs = result["Value"]
        # Execute the action
        result = self._processProposal(trid, proposalTuple, clientInitArgs)
        # Close the connection if required
        if result["closeTransport"]:
            self._transportPool.close(trid)
        return result

    def _receiveAndCheckProposal(self, trid):
        clientTransport = self._transportPool.get(trid)
        # Get the peer credentials
        credDict = clientTransport.getConnectingCredentials()
        # Receive the action proposal
        retVal = clientTransport.receiveData(1024)
        if not retVal["OK"]:
            gLogger.error(
                "Invalid action proposal",
                "%s %s" % (self._createIdentityString(credDict, clientTransport), retVal["Message"]),
            )
            return S_ERROR("Invalid action proposal")
        proposalTuple = retVal["Value"]
        gLogger.debug("Received action from client", "/".join(list(proposalTuple[1])))
        # Check if there are extra credentials
        if proposalTuple[2]:
            clientTransport.setExtraCredentials(proposalTuple[2])
        return S_OK(proposalTuple)

    def __getClientInitArgs(self, trid, proposalTuple):
        clientTransport = self._transportPool.get(trid)
        # Get the peer credentials
        credDict = clientTransport.getConnectingCredentials()
        if "x509Chain" not in credDict:
            return S_OK()
        cKey = (
            credDict["DN"],
            credDict.get("group", False),
            credDict.get("extraCredentials", False),
            credDict["isLimitedProxy"],
        )
        dP = self.__delegatedCredentials.get(cKey, 3600)
        idString = self._createIdentityString(credDict, clientTransport)
        if dP:
            gLogger.verbose("Proxy for %s is cached" % idString)
            return S_OK(dP)
        result = self.__requestDelegation(clientTransport, credDict)
        if not result["OK"]:
            gLogger.warn("Could not get proxy for %s: %s" % (idString, result["Message"]))
            return result
        delChain = result["Value"]
        delegatedChain = delChain.dumpAllToString()["Value"]
        secsLeft = delChain.getRemainingSecs()["Value"] - 1
        clientInitArgs = {
            BaseClient.KW_SETUP: proposalTuple[0][1],
            BaseClient.KW_TIMEOUT: 600,
            BaseClient.KW_IGNORE_GATEWAYS: True,
            BaseClient.KW_USE_CERTIFICATES: False,
            BaseClient.KW_PROXY_STRING: delegatedChain,
        }
        if BaseClient.KW_EXTRA_CREDENTIALS in credDict:
            clientInitArgs[BaseClient.KW_EXTRA_CREDENTIALS] = credDict[BaseClient.KW_EXTRA_CREDENTIALS]
        gLogger.warn("Got delegated proxy for %s: %s secs left" % (idString, secsLeft))
        self.__delegatedCredentials.add(cKey, secsLeft, clientInitArgs)
        return S_OK(clientInitArgs)

    def __requestDelegation(self, clientTransport, credDict):
        peerChain = credDict["x509Chain"]
        retVal = peerChain.getCertInChain()["Value"].generateProxyRequest()
        if not retVal["OK"]:
            return retVal
        delegationRequest = retVal["Value"]
        retVal = delegationRequest.dumpRequest()
        if not retVal["OK"]:
            retVal = S_ERROR("Server Error: Can't generate delegation request")
            clientTransport.sendData(retVal)
            return retVal
        gLogger.info("Sending delegation request for %s" % delegationRequest.getSubjectDN()["Value"])
        clientTransport.sendData(S_OK({"delegate": retVal["Value"]}))
        delegatedCertChain = clientTransport.receiveData()
        delegatedChain = X509Chain(keyObj=delegationRequest.getPKey())
        retVal = delegatedChain.loadChainFromString(delegatedCertChain)
        if not retVal["OK"]:
            retVal = S_ERROR("Error in receiving delegated proxy: %s" % retVal["Message"])
            clientTransport.sendData(retVal)
            return retVal
        return S_OK(delegatedChain)

    # Msg

    def _mbConnect(self, trid, handlerObj=None):
        return S_OK()

    def _mbReceivedMsg(self, cliTrid, msgObj):
        return self._msgForwarder.msgFromClient(cliTrid, msgObj)

    def _mbDisconnect(self, cliTrid):
        self._msgForwarder.cliDisconnect(cliTrid)

    # Execute action

    def _executeAction(self, trid, proposalTuple, clientInitArgs):
        clientTransport = self._transportPool.get(trid)
        credDict = clientTransport.getConnectingCredentials()
        targetService = proposalTuple[0][0]
        actionType = proposalTuple[1][0]
        actionMethod = proposalTuple[1][1]
        idString = self._createIdentityString(credDict, clientTransport)
        # OOkay! Lets do the magic!
        retVal = clientTransport.receiveData()
        if not retVal["OK"]:
            gLogger.error("Error while receiving file description", retVal["Message"])
            clientTransport.sendData(S_ERROR("Error while receiving file description: %s" % retVal["Message"]))
            return
        if actionType == "FileTransfer":
            gLogger.warn("Received a file transfer action from %s" % idString)
            clientTransport.sendData(S_OK("Accepted"))
            retVal = self.__forwardFileTransferCall(
                targetService, clientInitArgs, actionMethod, retVal["Value"], clientTransport
            )
        elif actionType == "RPC":
            gLogger.info("Forwarding %s/%s action to %s for %s" % (actionType, actionMethod, targetService, idString))
            retVal = self.__forwardRPCCall(targetService, clientInitArgs, actionMethod, retVal["Value"])
        elif actionType == "Connection" and actionMethod == "new":
            gLogger.info("Initiating a messaging connection to %s for %s" % (targetService, idString))
            retVal = self._msgForwarder.addClient(trid, targetService, clientInitArgs, retVal["Value"])
        else:
            gLogger.warn("Received an invalid %s/%s action from %s" % (actionType, actionMethod, idString))
            retVal = S_ERROR("Unknown type of action (%s)" % actionType)
        # TODO: Send back the data?
        if "rpcStub" in retVal:
            retVal.pop("rpcStub")
        clientTransport.sendData(retVal)
        return retVal

    def __forwardRPCCall(self, targetService, clientInitArgs, method, params):
        if targetService == "Configuration/Server":
            if method == "getCompressedDataIfNewer":
                # Relay CS data directly
                serviceVersion = gConfigurationData.getVersion()
                retDict = {"newestVersion": serviceVersion}
                clientVersion = params[0]
                if clientVersion < serviceVersion:
                    retDict["data"] = gConfigurationData.getCompressedData()
                return S_OK(retDict)
        # Default
        rpcClient = RPCClient(targetService, **clientInitArgs)
        methodObj = getattr(rpcClient, method)
        return methodObj(*params)

    def __forwardFileTransferCall(self, targetService, clientInitArgs, method, params, clientTransport):
        transferRelay = TransferRelay(targetService, **clientInitArgs)
        transferRelay.setTransferLimit(self.__transferBytesLimit)
        cliFH = FileHelper(clientTransport)
        # Check file size
        if method.find("ToClient") > -1:
            cliFH.setDirection("send")
        elif method.find("FromClient") > -1:
            cliFH.setDirection("receive")
            if not self.__ftCheckMaxTransferSize(params[2]):
                cliFH.markAsTransferred()
                return S_ERROR("Transfer size is too big")
        # Forward queries
        try:
            relayMethodObject = getattr(transferRelay, "forward%s" % method)
        except Exception:
            return S_ERROR("Cannot forward unknown method %s" % method)
        result = relayMethodObject(cliFH, params)
        return result

    def __ftCheckMaxTransferSize(self, requestedTransferSize):
        if not self.__transferBytesLimit:
            return True
        if not requestedTransferSize:
            return True
        if requestedTransferSize <= self.__transferBytesLimit:
            return True
        return False
Esempio n. 20
0
class RSSCache(object):
    '''
    Cache with purgeThread integrated
  '''
    def __init__(self, lifeTime, updateFunc=None, cacheHistoryLifeTime=None):
        '''
    Constructor
    '''

        self.__lifeTime = lifeTime
        # lifetime of the history on hours
        self.__cacheHistoryLifeTime = (1 and cacheHistoryLifeTime) or 24
        self.__updateFunc = updateFunc

        # RSSCache
        self.__rssCache = DictCache()
        self.__rssCacheStatus = []  # ( updateTime, message )
        self.__rssCacheLock = threading.Lock()

        # Create purgeThread
        self.__refreshStop = False
        self.__refreshThread = threading.Thread(
            target=self.__refreshCacheThreadRun)
        self.__refreshThread.setDaemon(True)

    def startRefreshThread(self):
        '''
      Run refresh thread.
    '''
        self.__refreshThread.start()

    def stopRefreshThread(self):
        '''
      Stop refresh thread.
    '''
        self.__refreshStop = True

    def isCacheAlive(self):
        '''
      Returns status of the cache refreshing thread 
    '''
        return S_OK(self.__refreshThread.isAlive())

    def setLifeTime(self, lifeTime):
        '''
      Set cache life time
    '''
        self.__lifeTime = lifeTime

    def setCacheHistoryLifeTime(self, cacheHistoryLifeTime):
        '''
      Set cache life time
    '''
        self.__cacheHistoryLifeTime = cacheHistoryLifeTime

    def getCacheKeys(self):
        '''
      List all the keys stored in the cache.
    '''
        self.__rssCacheLock.acquire()
        keys = self.__rssCache.getKeys()
        self.__rssCacheLock.release()

        return S_OK(keys)

    def acquireLock(self):
        '''
      Acquires RSSCache lock
    '''
        self.__rssCacheLock.acquire()

    def releaseLock(self):
        '''
      Releases RSSCache lock
    '''
        self.__rssCacheLock.release()

    def getCacheStatus(self):
        '''
      Return the latest cache status
    '''
        self.__rssCacheLock.acquire()
        if self.__rssCacheStatus:
            res = dict([self.__rssCacheStatus[0]])
        else:
            res = {}
        self.__rssCacheLock.release()
        return S_OK(res)

    def getCacheHistory(self):
        '''
      Return the cache updates history
    '''
        self.__rssCacheLock.acquire()
        res = dict(self.__rssCacheStatus)
        self.__rssCacheLock.release()
        return S_OK(res)

    def get(self, resourceKey):
        '''
      Gets the resource(s) status(es). Every resource can have multiple statuses, 
      so in order to speed up things, we store them on the cache as follows:
      
      { (<resourceName>,<resourceStatusType0>) : whatever0,
        (<resourceName>,<resourceStatusType1>) : whatever1,
      }
    '''

        #cacheKey = '%s#%s' % ( resourceName, resourceStatusType )

        self.__rssCacheLock.acquire()
        resourceStatus = self.__rssCache.get(resourceKey)
        self.__rssCacheLock.release()

        if resourceStatus:
            return S_OK({resourceKey: resourceStatus})
        return S_ERROR('Cannot get %s' % resourceKey)

    def getBulk(self, resourceKeys):
        '''
      Gets values for resourceKeys in one ATOMIC operation.
    '''

        result = {}
        self.__rssCacheLock.acquire()

        for resourceKey in resourceKeys:

            resourceRow = self.__rssCache.get(resourceKey)
            if not resourceRow:
                return S_ERROR('Cannot get %s' % resourceKey)
            result.update({resourceKey: resourceRow})

        self.__rssCacheLock.release()
        return S_OK(result)

    def resetCache(self):
        '''
      Reset cache.
    '''
        self.__rssCacheLock.acquire()
        self.__rssCache.purgeAll()
        self.__rssCacheLock.release()

        return S_OK()

    def refreshCache(self):
        '''
      Clears the cache and gets its latest version, not Thread safe !
      Acquire a lock before using it ! ( and release it afterwards ! )
    '''

        self.__rssCache.purgeAll()

        if self.__updateFunc is None:
            return S_ERROR('RSSCache has no updateFunction')
        newCache = self.__updateFunc()
        if not newCache['OK']:
            return newCache

        itemsAdded = self.__updateCache(newCache['Value'])

        return itemsAdded

    def refreshCacheAndHistory(self):
        '''
      Method that refreshes the cache and updates the history. Not thread safe,
      you must acquire a lock before using it, and release it right after !
    '''

        refreshResult = self.refreshCache()

        now = datetime.datetime.utcnow()

        if self.__rssCacheStatus:
            # Check oldest record
            dateInserted, _message = self.__rssCacheStatus[-1]
            if dateInserted < now - datetime.timedelta(
                    hours=self.__cacheHistoryLifeTime):
                self.__rssCacheStatus.pop()

        self.__rssCacheStatus.insert(0, (now, refreshResult))

################################################################################
# Private methods

    def __updateCache(self, newCache):
        '''
      The new cache must be a dictionary, which should look like:
      { ( <resourceName>,<resourceStatusType0>) : whatever0,
        ( <resourceName>,<resourceStatusType1>) : whatever1,
      }
    '''

        itemsCounter = 0

        for cacheKey, cacheValue in newCache.items():
            self.__rssCache.add(cacheKey, self.__lifeTime, value=cacheValue)
            itemsCounter += 1

        return S_OK(itemsCounter)

    def __refreshCacheThreadRun(self):
        '''
      Method that refreshes periodically the cache.
    '''

        while not self.__refreshStop:

            self.__rssCacheLock.acquire()
            self.refreshCacheAndHistory()
            self.__rssCacheLock.release()

            time.sleep(self.__lifeTime)

        self.__refreshStop = False


################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
Esempio n. 21
0
class Cache( object ):
  """
    Cache basic class.
    
    WARNING: None of its methods is thread safe. Acquire / Release lock when
    using them !
  """
  
  def __init__( self, lifeTime, updateFunc ):
    """
    Constructor
    
    :Parameters:
      **lifeTime** - `int`
        Lifetime of the elements in the cache ( seconds ! )
      **updateFunc** - `function`
        This function MUST return a S_OK | S_ERROR object. In the case of the first,
        its value must be a dictionary.
    
    """
    
    # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
    # starting at the same time, all the caches will not end at the same time.
    randomLifeTimeBias  = 0.2 * random.random()
    
    self.log            = gLogger.getSubLogger( self.__class__.__name__ )
    
    self.__lifeTime     = int( lifeTime * ( 1 + randomLifeTimeBias ) )
    self.__updateFunc   = updateFunc
    # The records returned from the cache must be valid at least 10 seconds.
    self.__validSeconds = 10
    
    # Cache
    self.__cache       = DictCache()
    self.__cacheLock   = LockRing()
    self.__cacheLock.getLock( self.__class__.__name__ )
  
  #.............................................................................
  # internal cache object getter
  
  def cacheKeys( self ):
    """
    Cache keys getter
      
    :returns: list with valid keys on the cache
    """
    
    return self.__cache.getKeys( validSeconds = self.__validSeconds ) 

  #.............................................................................
  # acquire / release Locks

  def acquireLock( self ):
    """
    Acquires Cache lock
    """
    
    self.__cacheLock.acquire( self.__class__.__name__ )

  def releaseLock( self ):
    """
    Releases Cache lock
    """
    
    self.__cacheLock.release( self.__class__.__name__)
  
  #.............................................................................
  # Cache getters

  def get( self, cacheKeys ):
    """
    Gets values for cacheKeys given, if all are found ( present on the cache and
    valid ), returns S_OK with the results. If any is not neither present not
    valid, returns S_ERROR. 
    
    :Parameters:
      **cacheKeys** - `list`
        list of keys to be extracted from the cache
        
    :return: S_OK | S_ERROR
    """

    result = {}

    for cacheKey in cacheKeys:

      cacheRow = self.__cache.get( cacheKey, validSeconds = self.__validSeconds )
      if not cacheRow:
        self.log.error( str( cacheKey ) )
        return S_ERROR( 'Cannot get %s' % str( cacheKey ) )
      result.update( { cacheKey : cacheRow } )
      
    return S_OK( result )

  #.............................................................................
  # Cache refreshers

  def refreshCache( self ):
    """     
    Purges the cache and gets fresh data from the update function.
    
    :return: S_OK | S_ERROR. If the first, its content is the new cache.    
    """

    self.log.verbose( 'refreshing...' )
    
    self.__cache.purgeAll()
    
    newCache = self.__updateFunc()
    if not newCache[ 'OK' ]:
      self.log.error( newCache[ 'Message' ] )
      return newCache
    
    newCache = self.__updateCache( newCache[ 'Value' ] )
    
    self.log.verbose( 'refreshed' )
    
    return newCache

  #.............................................................................
  # Private methods    
     
  def __updateCache( self, newCache ):
    """
    Given the new cache dictionary, updates the internal cache with it. It sets
    a duration to the entries of <self.__lifeTime> seconds.
    
    :Parameters:
      **newCache** - `dict`
        dictionary containing a new cache
    
    :return: dictionary. It is newCache argument.    
    """
    
    for cacheKey, cacheValue in newCache.items():
      self.__cache.add( cacheKey, self.__lifeTime, value = cacheValue )
    
    # We are assuming nothing will fail while inserting in the cache. There is
    # no apparent reason to suspect from that piece of code.     
    return S_OK( newCache )
Esempio n. 22
0
class RSSCache( object ):
  '''
  Cache with purgeThread integrated
  '''

  def __init__( self, lifeTime, updateFunc = None, cacheHistoryLifeTime = None ):
    '''
    Constructor
    '''

    self.__lifeTime             = lifeTime
    # lifetime of the history on hours
    self.__cacheHistoryLifeTime = ( 1 and cacheHistoryLifeTime ) or 24
    self.__updateFunc           = updateFunc

    # RSSCache
    self.__rssCache       = DictCache()
    self.__rssCacheStatus = [] # ( updateTime, message )
    self.__rssCacheLock   = threading.Lock()

    # Create purgeThread
    self.__refreshStop    = False
    self.__refreshThread  = threading.Thread( target = self.__refreshCacheThreadRun )
    self.__refreshThread.setDaemon( True )

  def startRefreshThread( self ):
    '''
    Run refresh thread.
    '''
    self.__refreshThread.start()

  def stopRefreshThread( self ):
    '''
    Stop refresh thread.
    '''
    self.__refreshStop = True

  def isCacheAlive( self ):
    '''
    Returns status of the cache refreshing thread
    '''
    return S_OK( self.__refreshThread.isAlive() )

  def setLifeTime( self, lifeTime ):
    '''
    Set cache life time
    '''
    self.__lifeTime = lifeTime

  def setCacheHistoryLifeTime( self, cacheHistoryLifeTime ):
    '''
    Set cache life time
    '''
    self.__cacheHistoryLifeTime = cacheHistoryLifeTime

  def getCacheKeys( self ):
    '''
    List all the keys stored in the cache.
    '''
    self.__rssCacheLock.acquire()
    keys = self.__rssCache.getKeys()
    self.__rssCacheLock.release()

    return S_OK( keys )

  def acquireLock( self ):
    '''
    Acquires RSSCache lock
    '''
    self.__rssCacheLock.acquire()

  def releaseLock( self ):
    '''
    Releases RSSCache lock
    '''
    self.__rssCacheLock.release()

  def getCacheStatus( self ):
    '''
    Return the latest cache status
    '''
    self.__rssCacheLock.acquire()
    if self.__rssCacheStatus:
      res = dict( [ self.__rssCacheStatus[ 0 ] ] )
    else:
      res = {}
    self.__rssCacheLock.release()
    return S_OK( res )

  def getCacheHistory( self ):
    '''
    Return the cache updates history
    '''
    self.__rssCacheLock.acquire()
    res = dict( self.__rssCacheStatus )
    self.__rssCacheLock.release()
    return S_OK( res )

  def get( self, resourceKey ):
    '''
    Gets the resource(s) status(es). Every resource can have multiple statuses,
    so in order to speed up things, we store them on the cache as follows::

      { (<resourceName>,<resourceStatusType0>) : whatever0,
        (<resourceName>,<resourceStatusType1>) : whatever1,
      }

    '''

    #cacheKey = '%s#%s' % ( resourceName, resourceStatusType )

    self.__rssCacheLock.acquire()
    resourceStatus = self.__rssCache.get( resourceKey )
    self.__rssCacheLock.release()

    if resourceStatus:
      return S_OK( { resourceKey : resourceStatus } )
    return S_ERROR( 'Cannot get %s' % resourceKey )

  def getBulk( self, resourceKeys ):
    '''
    Gets values for resourceKeys in one ATOMIC operation.
    '''

    result = {}
    self.__rssCacheLock.acquire()

    for resourceKey in resourceKeys:

      resourceRow = self.__rssCache.get( resourceKey )
      if not resourceRow:
        return S_ERROR( 'Cannot get %s' % resourceKey )
      result.update( { resourceKey : resourceRow } )

    self.__rssCacheLock.release()
    return S_OK( result )

  def resetCache( self ):
    '''
    Reset cache.
    '''
    self.__rssCacheLock.acquire()
    self.__rssCache.purgeAll()
    self.__rssCacheLock.release()

    return S_OK()

  def refreshCache( self ):
    '''
    Clears the cache and gets its latest version, not Thread safe !
    Acquire a lock before using it ! ( and release it afterwards ! )
    '''

    self.__rssCache.purgeAll()

    if self.__updateFunc is None:
      return S_ERROR( 'RSSCache has no updateFunction' )
    newCache = self.__updateFunc()
    if not newCache[ 'OK' ]:
      return newCache

    itemsAdded = self.__updateCache( newCache[ 'Value' ] )

    return itemsAdded

  def refreshCacheAndHistory( self ):
    '''
    Method that refreshes the cache and updates the history. Not thread safe,
    you must acquire a lock before using it, and release it right after !
    '''

    refreshResult = self.refreshCache()

    now = datetime.datetime.utcnow()

    if self.__rssCacheStatus:
      # Check oldest record
      dateInserted, _message = self.__rssCacheStatus[ -1 ]
      if dateInserted < now - datetime.timedelta( hours = self.__cacheHistoryLifeTime ):
        self.__rssCacheStatus.pop()

    self.__rssCacheStatus.insert( 0, ( now, refreshResult ) )

################################################################################
# Private methods

  def __updateCache( self, newCache ):
    '''
    The new cache must be a dictionary, which should look like::

      { ( <resourceName>,<resourceStatusType0>) : whatever0,
        ( <resourceName>,<resourceStatusType1>) : whatever1,
      }

    '''

    itemsCounter = 0

    for cacheKey, cacheValue in newCache.items():
      self.__rssCache.add( cacheKey, self.__lifeTime, value = cacheValue )
      itemsCounter += 1

    return S_OK( itemsCounter )

  def __refreshCacheThreadRun( self ):
    '''
    Method that refreshes periodically the cache.
    '''

    while not self.__refreshStop:

      self.__rssCacheLock.acquire()
      self.refreshCacheAndHistory()
      self.__rssCacheLock.release()

      time.sleep( self.__lifeTime )

    self.__refreshStop = False
Esempio n. 23
0
class Limiter(object):

  def __init__(self, jobDB=None, opsHelper=None):
    """ Constructor
    """
    self.__runningLimitSection = "JobScheduling/RunningLimit"
    self.__matchingDelaySection = "JobScheduling/MatchingDelay"
    self.csDictCache = DictCache()
    self.condCache = DictCache()
    self.delayMem = {}

    if jobDB:
      self.jobDB = jobDB
    else:
      self.jobDB = JobDB()

    self.log = gLogger.getSubLogger("Limiter")

    if opsHelper:
      self.__opsHelper = opsHelper
    else:
      self.__opsHelper = Operations()

  def getNegativeCond(self):
    """ Get negative condition for ALL sites
    """
    orCond = self.condCache.get("GLOBAL")
    if orCond:
      return orCond
    negCond = {}
    # Run Limit
    result = self.__opsHelper.getSections(self.__runningLimitSection)
    sites = []
    if result['OK']:
      sites = result['Value']
    for siteName in sites:
      result = self.__getRunningCondition(siteName)
      if not result['OK']:
        continue
      data = result['Value']
      if data:
        negCond[siteName] = data
    # Delay limit
    result = self.__opsHelper.getSections(self.__matchingDelaySection)
    sites = []
    if result['OK']:
      sites = result['Value']
    for siteName in sites:
      result = self.__getDelayCondition(siteName)
      if not result['OK']:
        continue
      data = result['Value']
      if not data:
        continue
      if siteName in negCond:
        negCond[siteName] = self.__mergeCond(negCond[siteName], data)
      else:
        negCond[siteName] = data
    orCond = []
    for siteName in negCond:
      negCond[siteName]['Site'] = siteName
      orCond.append(negCond[siteName])
    self.condCache.add("GLOBAL", 10, orCond)
    return orCond

  def getNegativeCondForSite(self, siteName):
    """ Generate a negative query based on the limits set on the site
    """
    # Check if Limits are imposed onto the site
    negativeCond = {}
    if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True):
      result = self.__getRunningCondition(siteName)
      if result['OK']:
        negativeCond = result['Value']
      self.log.verbose('Negative conditions for site %s after checking limits are: %s' % (siteName, str(negativeCond)))

    if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True):
      result = self.__getDelayCondition(siteName)
      if result['OK']:
        delayCond = result['Value']
        self.log.verbose('Negative conditions for site %s after delay checking are: %s' % (siteName, str(delayCond)))
        negativeCond = self.__mergeCond(negativeCond, delayCond)

    if negativeCond:
      self.log.info('Negative conditions for site %s are: %s' % (siteName, str(negativeCond)))

    return negativeCond

  def __mergeCond(self, negCond, addCond):
    """ Merge two negative dicts
    """
    # Merge both negative dicts
    for attr in addCond:
      if attr not in negCond:
        negCond[attr] = []
      for value in addCond[attr]:
        if value not in negCond[attr]:
          negCond[attr].append(value)
    return negCond

  def __extractCSData(self, section):
    """ Extract limiting information from the CS in the form:
        { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    """
    stuffDict = self.csDictCache.get(section)
    if stuffDict:
      return S_OK(stuffDict)

    result = self.__opsHelper.getSections(section)
    if not result['OK']:
      return result
    attribs = result['Value']
    stuffDict = {}
    for attName in attribs:
      result = self.__opsHelper.getOptionsDict("%s/%s" % (section, attName))
      if not result['OK']:
        return result
      attLimits = result['Value']
      try:
        attLimits = dict([(k, int(attLimits[k])) for k in attLimits])
      except Exception as excp:
        errMsg = "%s/%s has to contain numbers: %s" % (section, attName, str(excp))
        self.log.error(errMsg)
        return S_ERROR(errMsg)
      stuffDict[attName] = attLimits

    self.csDictCache.add(section, 300, stuffDict)
    return S_OK(stuffDict)

  def __getRunningCondition(self, siteName):
    """ Get extra conditions allowing site throttling
    """
    siteSection = "%s/%s" % (self.__runningLimitSection, siteName)
    result = self.__extractCSData(siteSection)
    if not result['OK']:
      return result
    limitsDict = result['Value']
    # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    if not limitsDict:
      return S_OK({})
    # Check if the site exceeding the given limits
    negCond = {}
    for attName in limitsDict:
      if attName not in self.jobDB.jobAttributeNames:
        self.log.error("Attribute %s does not exist. Check the job limits" % attName)
        continue
      cK = "Running:%s:%s" % (siteName, attName)
      data = self.condCache.get(cK)
      if not data:
        result = self.jobDB.getCounters(
            'Jobs', [attName], {
                'Site': siteName, 'Status': [
                    'Running', 'Matched', 'Stalled']})
        if not result['OK']:
          return result
        data = result['Value']
        data = dict([(k[0][attName], k[1]) for k in data])
        self.condCache.add(cK, 10, data)
      for attValue in limitsDict[attName]:
        limit = limitsDict[attName][attValue]
        running = data.get(attValue, 0)
        if running >= limit:
          self.log.verbose('Job Limit imposed at %s on %s/%s=%d,'
                           ' %d jobs already deployed' % (siteName, attName, attValue, limit, running))
          if attName not in negCond:
            negCond[attName] = []
          negCond[attName].append(attValue)
    # negCond is something like : {'JobType': ['Merge']}
    return S_OK(negCond)

  def updateDelayCounters(self, siteName, jid):
    # Get the info from the CS
    siteSection = "%s/%s" % (self.__matchingDelaySection, siteName)
    result = self.__extractCSData(siteSection)
    if not result['OK']:
      return result
    delayDict = result['Value']
    # limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
    if not delayDict:
      return S_OK()
    attNames = []
    for attName in delayDict:
      if attName not in self.jobDB.jobAttributeNames:
        self.log.error("Attribute %s does not exist in the JobDB. Please fix it!" % attName)
      else:
        attNames.append(attName)
    result = self.jobDB.getJobAttributes(jid, attNames)
    if not result['OK']:
      self.log.error("While retrieving attributes coming from %s: %s" % (siteSection, result['Message']))
      return result
    atts = result['Value']
    # Create the DictCache if not there
    if siteName not in self.delayMem:
      self.delayMem[siteName] = DictCache()
    # Update the counters
    delayCounter = self.delayMem[siteName]
    for attName in atts:
      attValue = atts[attName]
      if attValue in delayDict[attName]:
        delayTime = delayDict[attName][attValue]
        self.log.notice("Adding delay for %s/%s=%s of %s secs" % (siteName, attName,
                                                                  attValue, delayTime))
        delayCounter.add((attName, attValue), delayTime)
    return S_OK()

  def __getDelayCondition(self, siteName):
    """ Get extra conditions allowing matching delay
    """
    if siteName not in self.delayMem:
      return S_OK({})
    lastRun = self.delayMem[siteName].getKeys()
    negCond = {}
    for attName, attValue in lastRun:
      if attName not in negCond:
        negCond[attName] = []
      negCond[attName].append(attValue)
    return S_OK(negCond)