Example #1
0
    def __init__(self, lifeTime, updateFunc):
        """
    Constructor
    
    :Parameters:
      **lifeTime** - `int`
        Lifetime of the elements in the cache ( seconds ! )
      **updateFunc** - `function`
        This function MUST return a S_OK | S_ERROR object. In the case of the first,
        its value must be a dictionary.
    
    """

        # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
        # starting at the same time, all the caches will not end at the same time.
        randomLifeTimeBias = 0.2 * random.random()

        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self.__lifeTime = int(lifeTime * (1 + randomLifeTimeBias))
        self.__updateFunc = updateFunc
        # The records returned from the cache must be valid at least 30 seconds.
        self.__validSeconds = 30

        # Cache
        self.__cache = DictCache()
        self.__cacheLock = LockRing()
        self.__cacheLock.getLock(self.__class__.__name__)
Example #2
0
    def __init__(self, loadDefaultCFG=True):
        envVar = os.environ.get("DIRAC_FEWER_CFG_LOCKS", "no").lower()
        self.__locksEnabled = envVar not in ("y", "yes", "t", "true", "on",
                                             "1")
        if self.__locksEnabled:
            lr = LockRing()
            self.threadingEvent = lr.getEvent()
            self.threadingEvent.set()
            self.threadingLock = lr.getLock()
            self.runningThreadsNumber = 0

        self.__compressedConfigurationData = None
        self.configurationPath = "/DIRAC/Configuration"
        self.backupsDir = os.path.join(DIRAC.rootPath, "etc", "csbackup")
        self._isService = False
        self.localCFG = CFG()
        self.remoteCFG = CFG()
        self.mergedCFG = CFG()
        self.remoteServerList = []
        if loadDefaultCFG:
            defaultCFGFile = os.path.join(DIRAC.rootPath, "etc", "dirac.cfg")
            gLogger.debug("dirac.cfg should be at", "%s" % defaultCFGFile)
            retVal = self.loadFile(defaultCFGFile)
            if not retVal["OK"]:
                gLogger.warn("Can't load %s file" % defaultCFGFile)
        self.sync()
Example #3
0
    def __init__(self, deleteFunction=False):
        """
    Initialize the dict cache.
      If a delete function is specified it will be invoked when deleting a cached object
    """

        self.__lock = LockRing()
        self.__lock.getLock(self.__class__.__name__, recursive=True)

        self.__cache = {}
        self.__deleteFunction = deleteFunction
Example #4
0
class Synchronizer:
    """ Class encapsulating a lock
  allowing it to be used as a synchronizing
  decorator making the call thread-safe"""

    def __init__(self, lockName="", recursive=False):
        from DIRAC.Core.Utilities.LockRing import LockRing

        self.__lockName = lockName
        self.__lr = LockRing()
        self.__lock = self.__lr.getLock(lockName, recursive=recursive)

    def __call__(self, funcToCall):
        def lockedFunc(*args, **kwargs):
            try:
                if self.__lockName:
                    print "LOCKING", self.__lockName
                self.__lock.acquire()
                return funcToCall(*args, **kwargs)
            finally:
                if self.__lockName:
                    print "UNLOCKING", self.__lockName
                self.__lock.release()

        return lockedFunc

    def lock(self):
        return self.__lock.acquire()

    def unlock(self):
        return self.__lock.release()
Example #5
0
 def __init__( self, lifeTime, updateFunc ):
   """
   Constructor
   
   :Parameters:
     **lifeTime** - `int`
       Lifetime of the elements in the cache ( seconds ! )
     **updateFunc** - `function`
       This function MUST return a S_OK | S_ERROR object. In the case of the first,
       its value must be a dictionary.
   
   """
   
   # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
   # starting at the same time, all the caches will not end at the same time.
   randomLifeTimeBias  = 0.2 * random.random()
   
   self.log            = gLogger.getSubLogger( self.__class__.__name__ )
   
   self.__lifeTime     = int( lifeTime * ( 1 + randomLifeTimeBias ) )
   self.__updateFunc   = updateFunc
   # The records returned from the cache must be valid at least 10 seconds.
   self.__validSeconds = 10
   
   # Cache
   self.__cache       = DictCache()
   self.__cacheLock   = LockRing()
   self.__cacheLock.getLock( self.__class__.__name__ )
Example #6
0
class Synchronizer(object):
    """ Class encapsulating a lock
  allowing it to be used as a synchronizing
  decorator making the call thread-safe"""
    def __init__(self, lockName="", recursive=False):
        from DIRAC.Core.Utilities.LockRing import LockRing
        self.__lockName = lockName
        self.__lr = LockRing()
        self.__lock = self.__lr.getLock(lockName, recursive=recursive)

    def __call__(self, funcToCall):
        def lockedFunc(*args, **kwargs):
            try:
                if self.__lockName:
                    print("LOCKING", self.__lockName)
                self.__lock.acquire()
                return funcToCall(*args, **kwargs)
            finally:
                if self.__lockName:
                    print("UNLOCKING", self.__lockName)
                self.__lock.release()

        return lockedFunc

    def lock(self):
        return self.__lock.acquire()

    def unlock(self):
        return self.__lock.release()
Example #7
0
 def lock(self):
     """ Lock to assure thread-safe access to the internal connection storage.
 """
     if not self.__lock:
         self.__lock = LockRing().getLock(self.__class__.__name__,
                                          recursive=True)
     return self.__lock
Example #8
0
    def __init__(self, maxReads=10):
        from DIRAC.Core.Utilities.LockRing import LockRing

        self.__lr = LockRing()
        self.__lock = self.__lr.getLock()
        self.__maxReads = maxReads
        self.__semaphore = threading.Semaphore(maxReads)
Example #9
0
def _putProxy(userDN=None,
              userName=None,
              userGroup=None,
              vomsFlag=None,
              proxyFilePath=None,
              executionLockFlag=False):
    """Download proxy, place in a file and populate X509_USER_PROXY environment variable.

    Parameters like `userProxy` or `executeWithUserProxy`.
    :returns: Tuple of originalUserProxy, useServerCertificate, executionLock
    """
    # Setup user proxy
    if userDN:
        userDNs = [userDN]
    else:
        result = getDNForUsername(userName)
        if not result["OK"]:
            return result
        userDNs = result["Value"]  # a same user may have more than one DN

    vomsAttr = ""
    if vomsFlag:
        vomsAttr = getVOMSAttributeForGroup(userGroup)

    result = getProxy(userDNs, userGroup, vomsAttr, proxyFilePath)

    if not result["OK"]:
        return result

    executionLock = LockRing().getLock(
        "_UseUserProxy_", recursive=True) if executionLockFlag else None
    if executionLockFlag:
        executionLock.acquire()

    os.environ["X509_USER_PROXY"], originalUserProxy = result[
        "Value"], os.environ.get("X509_USER_PROXY")

    # Check if the caller is executing with the host certificate
    useServerCertificate = gConfig.useServerCertificate()
    if useServerCertificate:
        gConfigurationData.setOptionInCFG(
            "/DIRAC/Security/UseServerCertificate", "false")

    return S_OK((originalUserProxy, useServerCertificate, executionLock))
Example #10
0
 def __init__( self, deleteFunction = False ):
   """
   Initialize the dict cache.
     If a delete function is specified it will be invoked when deleting a cached object
   """
   
   self.__lock = LockRing()
   self.__lock.getLock( self.__class__.__name__, recursive = True )
   
   self.__cache = {}
   self.__deleteFunction = deleteFunction
Example #11
0
class ThreadSafeSSLObject:
    cLock = LockRing().getLock()

    def __init__(self, object):
        self.cObject = object

    def __getattr__(self, name):
        method = getattr(self.cObject, name)
        if callable(method):
            return _MagicMethod(self.cLock, method, name)
        else:
            return method
Example #12
0
 def __init__( self, loadDefaultCFG = True ):
   lr = LockRing()
   self.threadingEvent = lr.getEvent()
   self.threadingEvent.set()
   self.threadingLock = lr.getLock()
   self.runningThreadsNumber = 0
   self.compressedConfigurationData = ""
   self.configurationPath = "/DIRAC/Configuration"
   self.backupsDir = os.path.join( DIRAC.rootPath, "etc", "csbackup" )
   self._isService = False
   self.localCFG = CFG()
   self.remoteCFG = CFG()
   self.mergedCFG = CFG()
   self.remoteServerList = []
   if loadDefaultCFG:
     defaultCFGFile = os.path.join( DIRAC.rootPath, "etc", "dirac.cfg" )
     gLogger.debug( "dirac.cfg should be at", "%s" % defaultCFGFile )
     retVal = self.loadFile( defaultCFGFile )
     if not retVal[ 'OK' ]:
       gLogger.warn( "Can't load %s file" % defaultCFGFile )
   self.sync()
Example #13
0
 def __init__( self, loadDefaultCFG = True ):
   lr = LockRing()
   self.threadingEvent = lr.getEvent()
   self.threadingEvent.set()
   self.threadingLock = lr.getLock()
   self.runningThreadsNumber = 0
   self.compressedConfigurationData = ""
   self.configurationPath = "/DIRAC/Configuration"
   self.backupsDir = os.path.join( DIRAC.rootPath, "etc", "csbackup" )
   self._isService = False
   self.localCFG = CFG()
   self.remoteCFG = CFG()
   self.mergedCFG = CFG()
   self.remoteServerList = []
   if loadDefaultCFG:
     defaultCFGFile = os.path.join( DIRAC.rootPath, "etc", "dirac.cfg" )
     gLogger.debug( "dirac.cfg should be at", "%s" % defaultCFGFile )
     retVal = self.loadFile( defaultCFGFile )
     if not retVal[ 'OK' ]:
       gLogger.warn( "Can't load %s file" % defaultCFGFile )
   self.sync()
Example #14
0
    def wrapped_fcn(*args, **kwargs):

        # Get the lock and acquire it
        executionLock = LockRing().getLock("_UseUserProxy_", recursive=True)
        executionLock.acquire()

        # Check if the caller is executing with the host certificate
        useServerCertificate = gConfig.useServerCertificate()
        if useServerCertificate:
            gConfigurationData.setOptionInCFG(
                "/DIRAC/Security/UseServerCertificate", "false")

        try:
            return fcn(*args, **kwargs)
        except Exception as lException:  # pylint: disable=broad-except
            value = ",".join([str(arg) for arg in lException.args])
            exceptType = lException.__class__.__name__
            return S_ERROR("Exception - %s: %s" % (exceptType, value))
        finally:
            # Restore the default host certificate usage if necessary
            if useServerCertificate:
                gConfigurationData.setOptionInCFG(
                    "/DIRAC/Security/UseServerCertificate", "true")
            # release the lock
            executionLock.release()
Example #15
0
    def __init__(self, systemInstance="Default"):
        """c'tor

    :param self: self reference
    :param str systemInstance: ???
    :param int maxQueueSize: size of queries queue
    """
        DB.__init__(self, "FTSDB", "DataManagement/FTSDB")
        #    self.log = gLogger.getSubLogger( "DataManagement/FTSDB" )
        # # private lock
        self.getIdLock = LockRing().getLock("FTSDBLock")
        # # max attempt for reschedule
        self.maxAttempt = 100
Example #16
0
    def lock(self):
        """ Return the lock.
        In practice, if the cache is shared among threads, it is a LockRing.
        Otherwise, it is just a mock object.
    """

        if not self.__lock:
            if not self.__threadLocal:
                self.__lock = LockRing().getLock(self.__class__.__name__,
                                                 recursive=True)
            else:
                self.__lock = MockLockRing()

        return self.__lock
Example #17
0
  def wrapped_fcn( *args, **kwargs ):

    # Get the lock and acquire it
    executionLock = LockRing().getLock( '_UseUserProxy_', recursive = True )
    executionLock.acquire()

    # Check if the caller is executing with the host certificate
    useServerCertificate = gConfig.useServerCertificate()
    if useServerCertificate:
      gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )

    try:
      return fcn( *args, **kwargs )
    except Exception as lException:  # pylint: disable=broad-except
      value = ','.join( [str( arg ) for arg in lException.args] )
      exceptType = lException.__class__.__name__
      return S_ERROR( "Exception - %s: %s" % ( exceptType, value ) )
    finally:
      # Restore the default host certificate usage if necessary
      if useServerCertificate:
        gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
      # release the lock
      executionLock.release()
Example #18
0
def _putProxy(userDN=None, userName=None, userGroup=None, vomsFlag=None, proxyFilePath=None, executionLockFlag=False):
  """Download proxy, place in a file and populate X509_USER_PROXY environment variable.

  Parameters like `userProxy` or `executeWithUserProxy`.
  :returns: Tuple of originalUserProxy, useServerCertificate, executionLock
  """
  # Setup user proxy
  if userDN:
    userDNs = [userDN]
  else:
    result = getDNForUsername(userName)
    if not result['OK']:
      return result
    userDNs = result['Value']  # a same user may have more than one DN

  vomsAttr = ''
  if vomsFlag:
    vomsAttr = getVOMSAttributeForGroup(userGroup)

  result = getProxy(userDNs, userGroup, vomsAttr, proxyFilePath)

  if not result['OK']:
    return result

  executionLock = LockRing().getLock('_UseUserProxy_', recursive=True) if executionLockFlag else None
  if executionLockFlag:
    executionLock.acquire()

  os.environ['X509_USER_PROXY'], originalUserProxy = result['Value'], os.environ.get('X509_USER_PROXY')

  # Check if the caller is executing with the host certificate
  useServerCertificate = gConfig.useServerCertificate()
  if useServerCertificate:
    gConfigurationData.setOptionInCFG('/DIRAC/Security/UseServerCertificate', 'false')

  return S_OK((originalUserProxy, useServerCertificate, executionLock))
Example #19
0
class SocketInfo:

  __cachedCAsCRLs = False
  __cachedCAsCRLsLastLoaded = 0
  __cachedCAsCRLsLoadLock = LockRing().getLock()

  def __init__(self, infoDict, sslContext=None):
    self.__retry = 0
    self.infoDict = infoDict
    if sslContext:
      self.sslContext = sslContext
    else:
      if self.infoDict['clientMode']:
        if 'useCertificates' in self.infoDict and self.infoDict['useCertificates']:
          retVal = self.__generateContextWithCerts()
        elif 'proxyString' in self.infoDict:
          retVal = self.__generateContextWithProxyString()
        else:
          retVal = self.__generateContextWithProxy()
      else:
        retVal = self.__generateServerContext()
      if not retVal['OK']:
        raise Exception(retVal['Message'])

  def __getValue(self, optName, default):
    if optName not in self.infoDict:
      return default
    return self.infoDict[optName]

  def setLocalCredentialsLocation(self, credTuple):
    self.infoDict['localCredentialsLocation'] = credTuple

  def getLocalCredentialsLocation(self):
    return self.infoDict['localCredentialsLocation']

  def gatherPeerCredentials(self):
    certList = []
    certList = self.sslSocket.get_peer_certificate_chain()
    # Servers don't receive the whole chain, the last cert comes alone
    if not self.infoDict['clientMode']:
      certList.insert(0, self.sslSocket.get_peer_certificate())
    peerChain = X509Chain(certList=certList)
    isProxyChain = peerChain.isProxy()['Value']
    isLimitedProxyChain = peerChain.isLimitedProxy()['Value']
    if isProxyChain:
      if peerChain.isPUSP()['Value']:
        identitySubject = peerChain.getCertInChain(
            -2)['Value'].getSubjectNameObject()['Value']
      else:
        identitySubject = peerChain.getIssuerCert(
        )['Value'].getSubjectNameObject()['Value']
    else:
      identitySubject = peerChain.getCertInChain(
          0)['Value'].getSubjectNameObject()['Value']
    credDict = {}
    credDict = {'DN': identitySubject.one_line(),
                'CN': identitySubject.commonName,
                'x509Chain': peerChain,
                'isProxy': isProxyChain,
                'isLimitedProxy': isLimitedProxyChain}
    diracGroup = peerChain.getDIRACGroup()
    if diracGroup['OK'] and diracGroup['Value']:
      credDict['group'] = diracGroup['Value']
    self.infoDict['peerCredentials'] = credDict
    return credDict

  def setSSLSocket(self, sslSocket):
    self.sslSocket = sslSocket

  def getSSLSocket(self):
    return self.sslSocket

  def getSSLContext(self):
    return self.sslContext

  def clone(self):
    try:
      return S_OK(SocketInfo(dict(self.infoDict), self.sslContext))
    except Exception as e:
      return S_ERROR(str(e))

  def verifyCallback(self, *args, **kwargs):
    # gLogger.debug( "verify Callback %s" % str( args ) )
    if self.infoDict['clientMode']:
      return self._clientCallback(*args, **kwargs)
    else:
      return self._serverCallback(*args, **kwargs)

  def __isSameHost(self, hostCN, hostConn):
    """ Guess if it is the same host or not
    """
    hostCN_m = hostCN
    if '/' in hostCN:
      hostCN_m = hostCN.split('/')[1]
    if hostCN_m == hostConn:
      return True
    result = checkHostsMatch(hostCN_m, hostConn)
    if not result['OK']:
      return False
    return result['Value']

  def _clientCallback(self, conn, cert, errnum, depth, ok):
    # This obviously has to be updated
    if depth == 0 and ok == 1:
      hostnameCN = cert.get_subject().commonName
      # if hostnameCN in ( self.infoDict[ 'hostname' ], "host/%s" % self.infoDict[ 'hostname' ]  ):
      if self.__isSameHost(hostnameCN, self.infoDict['hostname']):
        return 1
      else:
        gLogger.warn("Server is not who it's supposed to be",
                     "Connecting to %s and it's %s" % (self.infoDict['hostname'], hostnameCN))
        return ok
    return ok

  def _serverCallback(self, conn, cert, errnum, depth, ok):
    return ok

  def __getCAStore(self):
    SocketInfo.__cachedCAsCRLsLoadLock.acquire()
    try:
      if not SocketInfo.__cachedCAsCRLs or time.time() - SocketInfo.__cachedCAsCRLsLastLoaded > 900:
        # Need to generate the CA Store
        casDict = {}
        crlsDict = {}
        casPath = Locations.getCAsLocation()
        if not casPath:
          return S_ERROR("No valid CAs location found")
        gLogger.debug("CAs location is %s" % casPath)
        casFound = 0
        crlsFound = 0

        SocketInfo.__caStore = GSI.crypto.X509Store()
        for fileName in os.listdir(casPath):
          filePath = os.path.join(casPath, fileName)
          if not os.path.isfile(filePath):
            continue
          fObj = file(filePath, "rb")
          pemData = fObj.read()
          fObj.close()
          # Try to load CA Cert
          try:
            caCert = GSI.crypto.load_certificate(GSI.crypto.FILETYPE_PEM, pemData)
            if caCert.has_expired():
              continue
            caID = (caCert.get_subject().one_line(),
                    caCert.get_issuer().one_line())
            caNotAfter = caCert.get_not_after()
            if caID not in casDict:
              casDict[caID] = (caNotAfter, caCert)
              casFound += 1
            else:
              if casDict[caID][0] < caNotAfter:
                casDict[caID] = (caNotAfter, caCert)
            continue
          except BaseException:
            if fileName.find(".0") == len(fileName) - 2:
              gLogger.exception("LOADING %s" % filePath)
          if 'IgnoreCRLs' not in self.infoDict or not self.infoDict['IgnoreCRLs']:
            # Try to load CRL
            try:
              crl = GSI.crypto.load_crl(GSI.crypto.FILETYPE_PEM, pemData)
              if crl.has_expired():
                continue
              crlID = crl.get_issuer().one_line()
              crlsDict[crlID] = crl
              crlsFound += 1
              continue
            except Exception as e:
              if fileName.find(".r0") == len(fileName) - 2:
                gLogger.exception("LOADING %s ,Exception: %s" %
                                  (filePath, str(e)))

        gLogger.debug("Loaded %s CAs [%s CRLs]" % (casFound, crlsFound))
        SocketInfo.__cachedCAsCRLs = ([casDict[k][1] for k in casDict],
                                      [crlsDict[k] for k in crlsDict])
        SocketInfo.__cachedCAsCRLsLastLoaded = time.time()
    except BaseException:
      gLogger.exception("Failed to init CA store")
    finally:
      SocketInfo.__cachedCAsCRLsLoadLock.release()
    # Generate CA Store
    caStore = GSI.crypto.X509Store()
    caList = SocketInfo.__cachedCAsCRLs[0]
    for caCert in caList:
      caStore.add_cert(caCert)
    crlList = SocketInfo.__cachedCAsCRLs[1]
    for crl in crlList:
      caStore.add_crl(crl)
    return S_OK(caStore)

  def __createContext(self):
    clientContext = self.__getValue('clientMode', False)
    # Initialize context
    contextOptions = GSI.SSL.OP_ALL
    if clientContext:
      methodSuffix = "CLIENT_METHOD"
    else:
      methodSuffix = "SERVER_METHOD"
      contextOptions |= GSI.SSL.OP_NO_SSLv2 | GSI.SSL.OP_NO_SSLv3
    if 'sslMethod' in self.infoDict:
      methodName = "%s_%s" % (self.infoDict['sslMethod'], methodSuffix)
    else:
      methodName = "TLSv1_%s" % (methodSuffix)
    try:
      method = getattr(GSI.SSL, methodName)
    except BaseException:
      return S_ERROR("SSL method %s is not valid" % self.infoDict['sslMethod'])
    self.sslContext = GSI.SSL.Context(method)
    self.sslContext.set_cipher_list(
        self.infoDict.get('sslCiphers', DEFAULT_SSL_CIPHERS))
    if contextOptions:
      self.sslContext.set_options(contextOptions)
    # self.sslContext.set_read_ahead( 1 )
    # Enable GSI?
    gsiEnable = False
    if not clientContext or self.__getValue('gsiEnable', False):
      gsiEnable = True
    # DO CA Checks?
    if not self.__getValue('skipCACheck', False):
      self.sslContext.set_verify(GSI.SSL.VERIFY_PEER | GSI.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
                                 None, gsiEnable)  # Demand a certificate
      result = self.__getCAStore()
      if not result['OK']:
        return result
      caStore = result['Value']
      self.sslContext.set_cert_store(caStore)
    else:
      self.sslContext.set_verify(
          GSI.SSL.VERIFY_NONE, None, gsiEnable)  # Demand a certificate
    return S_OK()

  def __generateContextWithCerts(self):
    certKeyTuple = Locations.getHostCertificateAndKeyLocation()
    if not certKeyTuple:
      return S_ERROR("No valid certificate or key found")
    self.setLocalCredentialsLocation(certKeyTuple)
    gLogger.debug("Using certificate %s\nUsing key %s" % certKeyTuple)
    retVal = self.__createContext()
    if not retVal['OK']:
      return retVal
    # Verify depth to 20 to ensure accepting proxies of proxies of proxies....
    self.sslContext.set_verify_depth(VERIFY_DEPTH)
    self.sslContext.use_certificate_chain_file(certKeyTuple[0])
    self.sslContext.use_privatekey_file(certKeyTuple[1])
    return S_OK()

  def __generateContextWithProxy(self):
    if 'proxyLocation' in self.infoDict:
      proxyPath = self.infoDict['proxyLocation']
      if not os.path.isfile(proxyPath):
        return S_ERROR("Defined proxy is not a file")
    else:
      proxyPath = Locations.getProxyLocation()
      if not proxyPath:
        return S_ERROR("No valid proxy found")
    self.setLocalCredentialsLocation((proxyPath, proxyPath))
    gLogger.debug("Using proxy %s" % proxyPath)
    retVal = self.__createContext()
    if not retVal['OK']:
      return retVal
    self.sslContext.use_certificate_chain_file(proxyPath)
    self.sslContext.use_privatekey_file(proxyPath)
    return S_OK()

  def __generateContextWithProxyString(self):
    proxyString = self.infoDict['proxyString']
    self.setLocalCredentialsLocation((proxyString, proxyString))
    gLogger.debug("Using string proxy")
    retVal = self.__createContext()
    if not retVal['OK']:
      return retVal
    self.sslContext.use_certificate_chain_string(proxyString)
    self.sslContext.use_privatekey_string(proxyString)
    return S_OK()

  def __generateServerContext(self):
    retVal = self.__generateContextWithCerts()
    if not retVal['OK']:
      return retVal
    self.sslContext.set_session_id("DISETConnection%s" % str(time.time()))
    # self.sslContext.get_cert_store().set_flags( GSI.crypto.X509_CRL_CHECK )
    if 'SSLSessionTimeout' in self.infoDict:
      timeout = int(self.infoDict['SSLSessionTimeout'])
      gLogger.debug("Setting session timeout to %s" % timeout)
      self.sslContext.set_session_timeout(timeout)
    return S_OK()

  def doClientHandshake(self):
    self.sslSocket.set_connect_state()
    return self.__sslHandshake()

  def doServerHandshake(self):
    self.sslSocket.set_accept_state()
    return self.__sslHandshake()

  # @gSynchro
  def __sslHandshake(self):
    """
      Do the SSL Handshake

      :return: S_ERROR / S_OK with dictionary of user credentials
    """

    start = time.time()
    timeout = self.infoDict['timeout']
    while True:
      if timeout:
        if time.time() - start > timeout:
          return S_ERROR("Handshake timeout exceeded")
      try:
        self.sslSocket.do_handshake()
        break
      except GSI.SSL.WantReadError:
        time.sleep(0.001)
      except GSI.SSL.WantWriteError:
        time.sleep(0.001)
      except GSI.SSL.Error as v:
        if self.__retry < 3:
          self.__retry += 1
          return self.__sslHandshake()
        else:
          # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
          gLogger.warn("Error while handshaking", v)
          return S_ERROR("Error while handshaking")
      except Exception as v:
        gLogger.warn("Error while handshaking", v)
        if self.__retry < 3:
          self.__retry += 1
          return self.__sslHandshake()
        else:
          # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
          gLogger.warn("Error while handshaking", v)
          return S_ERROR("Error while handshaking")

    credentialsDict = self.gatherPeerCredentials()
    if self.infoDict['clientMode']:
      hostnameCN = credentialsDict['CN']
      # if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]:
      if not self.__isSameHost(hostnameCN, self.infoDict['hostname']):
        gLogger.warn("Server is not who it's supposed to be",
                     "Connecting to %s and it's %s" % (self.infoDict['hostname'], hostnameCN))
    gLogger.debug("", "Authenticated peer (%s)" % credentialsDict['DN'])
    return S_OK(credentialsDict)
Example #20
0
 def activitiesLock(self):
     if not self.__activitiesLock:
         self.__activitiesLock = LockRing().getLock("activityLock")
     return self.__activitiesLock
Example #21
0
 def flushingLock(self):
     if not self.__flushingLock:
         self.__flushingLock = LockRing().getLock("flushingLock")
     return self.__flushingLock
Example #22
0
class Cache( object ):
  """
    Cache basic class.
    
    WARNING: None of its methods is thread safe. Acquire / Release lock when
    using them !
  """
  
  def __init__( self, lifeTime, updateFunc ):
    """
    Constructor
    
    :Parameters:
      **lifeTime** - `int`
        Lifetime of the elements in the cache ( seconds ! )
      **updateFunc** - `function`
        This function MUST return a S_OK | S_ERROR object. In the case of the first,
        its value must be a dictionary.
    
    """
    
    # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
    # starting at the same time, all the caches will not end at the same time.
    randomLifeTimeBias  = 0.2 * random.random()
    
    self.log            = gLogger.getSubLogger( self.__class__.__name__ )
    
    self.__lifeTime     = int( lifeTime * ( 1 + randomLifeTimeBias ) )
    self.__updateFunc   = updateFunc
    # The records returned from the cache must be valid at least 10 seconds.
    self.__validSeconds = 10
    
    # Cache
    self.__cache       = DictCache()
    self.__cacheLock   = LockRing()
    self.__cacheLock.getLock( self.__class__.__name__ )
  
  #.............................................................................
  # internal cache object getter
  
  def cacheKeys( self ):
    """
    Cache keys getter
      
    :returns: list with valid keys on the cache
    """
    
    return self.__cache.getKeys( validSeconds = self.__validSeconds ) 

  #.............................................................................
  # acquire / release Locks

  def acquireLock( self ):
    """
    Acquires Cache lock
    """
    
    self.__cacheLock.acquire( self.__class__.__name__ )

  def releaseLock( self ):
    """
    Releases Cache lock
    """
    
    self.__cacheLock.release( self.__class__.__name__)
  
  #.............................................................................
  # Cache getters

  def get( self, cacheKeys ):
    """
    Gets values for cacheKeys given, if all are found ( present on the cache and
    valid ), returns S_OK with the results. If any is not neither present not
    valid, returns S_ERROR. 
    
    :Parameters:
      **cacheKeys** - `list`
        list of keys to be extracted from the cache
        
    :return: S_OK | S_ERROR
    """

    result = {}

    for cacheKey in cacheKeys:

      cacheRow = self.__cache.get( cacheKey, validSeconds = self.__validSeconds )
      if not cacheRow:
        self.log.error( str( cacheKey ) )
        return S_ERROR( 'Cannot get %s' % str( cacheKey ) )
      result.update( { cacheKey : cacheRow } )
      
    return S_OK( result )

  #.............................................................................
  # Cache refreshers

  def refreshCache( self ):
    """     
    Purges the cache and gets fresh data from the update function.
    
    :return: S_OK | S_ERROR. If the first, its content is the new cache.    
    """

    self.log.verbose( 'refreshing...' )
    
    self.__cache.purgeAll()
    
    newCache = self.__updateFunc()
    if not newCache[ 'OK' ]:
      self.log.error( newCache[ 'Message' ] )
      return newCache
    
    newCache = self.__updateCache( newCache[ 'Value' ] )
    
    self.log.verbose( 'refreshed' )
    
    return newCache

  #.............................................................................
  # Private methods    
     
  def __updateCache( self, newCache ):
    """
    Given the new cache dictionary, updates the internal cache with it. It sets
    a duration to the entries of <self.__lifeTime> seconds.
    
    :Parameters:
      **newCache** - `dict`
        dictionary containing a new cache
    
    :return: dictionary. It is newCache argument.    
    """
    
    for cacheKey, cacheValue in newCache.items():
      self.__cache.add( cacheKey, self.__lifeTime, value = cacheValue )
    
    # We are assuming nothing will fail while inserting in the cache. There is
    # no apparent reason to suspect from that piece of code.     
    return S_OK( newCache )
Example #23
0
    def run(self):
        """ task execution

    reads and executes ProcessTask :task: out of pending queue and then pushes it
    to the results queue for callback execution

    :param self: self reference
    """
        ## start watchdog thread
        self.__watchdogThread = threading.Thread(target=self.__watchdog)
        self.__watchdogThread.daemon = True
        self.__watchdogThread.start()

        ## http://cdn.memegenerator.net/instances/400x/19450565.jpg
        if LockRing:
            # Reset all locks
            lr = LockRing()
            lr._openAll()
            lr._setAllEvents()

        ## zero processed task counter
        taskCounter = 0
        ## zero idle loop counter
        idleLoopCount = 0

        ## main loop
        while True:

            ## draining, stopEvent is set, exiting
            if self.__stopEvent.is_set():
                return

            ## clear task
            self.task = None

            ## read from queue
            try:
                task = self.__pendingQueue.get(block=True, timeout=10)
            except Queue.Empty:
                ## idle loop?
                idleLoopCount += 1
                ## 10th idle loop - exit, nothing to do
                if idleLoopCount == 10:
                    return
                continue

            ## toggle __working flag
            self.__working.value = 1
            ## save task
            self.task = task
            ## reset idle loop counter
            idleLoopCount = 0

            ## process task in a separate thread
            self.__processThread = threading.Thread(target=self.__processTask)
            self.__processThread.start()

            ## join processThread with or without timeout
            if self.task.getTimeOut():
                self.__processThread.join(self.task.getTimeOut() + 10)
            else:
                self.__processThread.join()

            ## processThread is still alive? stop it!
            if self.__processThread.is_alive():
                self.__processThread._Thread__stop()

            ## check results and callbacks presence, put task to results queue
            if self.task.hasCallback() or self.task.hasPoolCallback():
                if not self.task.taskResults() and not self.task.taskException(
                ):
                    self.task.setResult(S_ERROR("Timed out"))
                self.__resultsQueue.put(task)
            ## increase task counter
            taskCounter += 1
            self.__taskCounter = taskCounter
            ## toggle __working flag
            self.__working.value = 0
Example #24
0
 def __init__(self, msgBroker):
     self.__inOutLock = LockRing().getLock()
     self.__msgBroker = msgBroker
     self.__byClient = {}
     self.__srvToCliTrid = {}
Example #25
0
  def run( self ):
    """ task execution

    reads and executes ProcessTask :task: out of pending queue and then pushes it
    to the results queue for callback execution

    :param self: self reference
    """
    ## start watchdog thread
    self.__watchdogThread = threading.Thread( target = self.__watchdog )
    self.__watchdogThread.daemon = True
    self.__watchdogThread.start()

    ## http://cdn.memegenerator.net/instances/400x/19450565.jpg
    if LockRing:
      # Reset all locks
      lr = LockRing()
      lr._openAll()
      lr._setAllEvents()

    ## zero processed task counter
    taskCounter = 0
    ## zero idle loop counter
    idleLoopCount = 0

    ## main loop
    while True:

      ## draining, stopEvent is set, exiting 
      if self.__stopEvent.is_set():
        return

      ## clear task
      self.task = None

      ## read from queue
      try:
        task = self.__pendingQueue.get( block = True, timeout = 10 )   
      except Queue.Empty:
        ## idle loop?
        idleLoopCount += 1
        ## 10th idle loop - exit, nothing to do 
        if idleLoopCount == 10:
          return 
        continue

      ## toggle __working flag
      self.__working.value = 1
      ## save task
      self.task = task
      ## reset idle loop counter
      idleLoopCount = 0

      ## process task in a separate thread
      self.__processThread = threading.Thread( target = self.__processTask )
      self.__processThread.start()

      ## join processThread with or without timeout
      if self.task.getTimeOut():
        self.__processThread.join( self.task.getTimeOut()+10 )
      else:
        self.__processThread.join()

      ## processThread is still alive? stop it!
      if self.__processThread.is_alive():
        self.__processThread._Thread__stop()
      
      ## check results and callbacks presence, put task to results queue
      if self.task.hasCallback() or self.task.hasPoolCallback():
        if not self.task.taskResults() and not self.task.taskException():
          self.task.setResult( S_ERROR("Timed out") )
        self.__resultsQueue.put( task )
      ## increase task counter
      taskCounter += 1
      self.__taskCounter = taskCounter 
      ## toggle __working flag
      self.__working.value = 0
Example #26
0
 def lock( self ):
   """ lock """
   if not self.__lock:
     self.__lock = LockRing().getLock( self.__class__.__name__, recursive = True )
   return self.__lock
Example #27
0
 def __init__(self, lockName="", recursive=False):
     from DIRAC.Core.Utilities.LockRing import LockRing
     self.__lockName = lockName
     self.__lr = LockRing()
     self.__lock = self.__lr.getLock(lockName, recursive=recursive)
Example #28
0
    def wrapped_fcn(*args, **kwargs):

        userName = kwargs.pop('proxyUserName', '')
        userDN = kwargs.pop('proxyUserDN', '')
        userGroup = kwargs.pop('proxyUserGroup', '')
        vomsFlag = kwargs.pop('proxyWithVOMS', True)
        proxyFilePath = kwargs.pop('proxyFilePath', False)
        executionLockFlag = kwargs.pop('executionLock', False)
        if executionLockFlag:
            executionLock = LockRing().getLock('_UseUserProxy_',
                                               recursive=True)

        if (userName or userDN) and userGroup:

            # Setup user proxy
            originalUserProxy = os.environ.get('X509_USER_PROXY')
            if userDN:
                userDNs = [userDN]
            else:
                result = getDNForUsername(userName)
                if not result['OK']:
                    return result
                userDNs = result[
                    'Value']  # a same user may have more than one DN
            vomsAttr = ''
            if vomsFlag:
                vomsAttr = getVOMSAttributeForGroup(userGroup)

            result = getProxy(userDNs, userGroup, vomsAttr, proxyFilePath)

            if not result['OK']:
                return result

            if executionLockFlag:
                executionLock.acquire()

            proxyFile = result['Value']
            os.environ['X509_USER_PROXY'] = proxyFile

            # Check if the caller is executing with the host certificate
            useServerCertificate = gConfig.useServerCertificate()
            if useServerCertificate:
                gConfigurationData.setOptionInCFG(
                    '/DIRAC/Security/UseServerCertificate', 'false')

            try:
                return fcn(*args, **kwargs)
            except Exception as lException:  # pylint: disable=broad-except
                value = ','.join([str(arg) for arg in lException.args])
                exceptType = lException.__class__.__name__
                return S_ERROR("Exception - %s: %s" % (exceptType, value))
            finally:
                # Restore the default host certificate usage if necessary
                if useServerCertificate:
                    gConfigurationData.setOptionInCFG(
                        '/DIRAC/Security/UseServerCertificate', 'true')
                if originalUserProxy:
                    os.environ['X509_USER_PROXY'] = originalUserProxy
                else:
                    os.environ.pop('X509_USER_PROXY')
                if executionLockFlag:
                    executionLock.release()

        else:
            # No proxy substitution requested
            return fcn(*args, **kwargs)
Example #29
0
class WORM(object):
    """
  Write One - Read Many
  """
    def __init__(self, maxReads=10):
        from DIRAC.Core.Utilities.LockRing import LockRing
        self.__lr = LockRing()
        self.__lock = self.__lr.getLock()
        self.__maxReads = maxReads
        self.__semaphore = threading.Semaphore(maxReads)

    def write(self, funcToCall):
        """
    Write decorator
    """
        def __doWriteLock(*args, **kwargs):
            try:
                self.__startWriteZone()
                return funcToCall(*args, **kwargs)
            finally:
                self.__endWriteZone()

        return __doWriteLock

    def read(self, funcToCall):
        """
    Read decorator
    """
        def __doReadLock(*args, **kwargs):
            try:
                self.__startReadZone()
                return funcToCall(*args, **kwargs)
            finally:
                self.__endReadZone()

        return __doReadLock

    def __startWriteZone(self):
        """
    Locks Event to prevent further threads from reading.
    Stops current thread until no other thread is accessing.
    PRIVATE USE
    """
        self.__lock.acquire()
        for i in range(self.__maxReads):
            self.__semaphore.acquire()
        self.__lock.release()

    def __endWriteZone(self):
        """
    Unlocks Event.
    PRIVATE USE
    """
        for i in range(self.__maxReads):
            self.__semaphore.release()

    def __startReadZone(self):
        """
    Start of danger zone. This danger zone may be or may not be a mutual exclusion zone.
    Counter is maintained to know how many threads are inside and be able to enable and disable mutual exclusion.
    PRIVATE USE
    """
        self.__semaphore.acquire()

    def __endReadZone(self):
        """
    End of danger zone.
    PRIVATE USE
    """
        self.__semaphore.release()
Example #30
0
class Logging(object):
  """
  Logging is a wrapper of the logger object from the standard "logging" library which integrate
  some DIRAC concepts. It is the equivalent to the old gLogger object.

  It is used like an interface to use the logger object of the "logging" library.
  Its purpose is to replace transparently the old gLogger object in the existing code in order to
  minimize the changes.

  In this way, each Logging embed a logger of "logging". It is possible to create sublogger,
  set and get the level of the embedded logger and create log messages with it.

  Logging could delegate the initialization and the configuration to a factory of the root logger be it can not
  because it has to wrap the old gLogger.

  Logging should not be instancied directly. It is LoggingRoot which is instancied and which instantiates Logging
  objects.
  """

  # componentName is a class variable because the component name is the same for every Logging objects
  # its default value is "Framework" but it can be configured in initialize() in LoggingRoot
  # it can be composed by the system name and the component name. For
  # instance: "Monitoring/Atom"
  _componentName = "Framework"
  # use the lockRing singleton to save the Logging object
  _lockRing = LockRing()
  # lock the configuration of the Logging
  _lockConfig = _lockRing.getLock("config")

  def __init__(self, father=None, fatherName='', name='', customName=''):
    """
    Initialization of the Logging object.
    By default, 'fatherName' and 'name' are empty, because getChild accepts only string and the first empty
    string corresponds to the root logger.
    Example:
    logging.getLogger('') == logging.getLogger('root') == root logger
    logging.getLogger('root').getChild('log') == root.log == log child of root

    :params father: Logging, father of this new Logging.
    :params fatherName: string representing the name of the father logger in the chain.
    :params name: string representing the name of the logger in the chain.
    :params customName: string representing the name of the logger in the chain:
                        - "root" does not appear at the beginning of the chain
                        - hierarchy "." are replaced by "\"
                        useful for the display of the Logging name
    """

    # Logging chain
    self._children = {}
    self._parent = father

    # initialize display options and level with the ones of the Logging parent
    if self._parent is not None:
      self._options = self._parent.getDisplayOptions()
      self._level = LogLevels.getLevelValue(father.getLevel())
    else:
      self._options = {'headerIsShown': True,
                       'threadIDIsShown': False, 'Color': False}
      # the native level is not used because it has to be to debug to send all
      # messages to the log central
      self._level = None

    # dictionary of the option state, modified by the user or not
    # this is to give to the options the same behaviour that the "logging" level:
    # - propagation from the parent to the children when their levels are not set by the developer himself
    # - stop the propagation when a developer set a level to a child
    self._optionsModified = {'headerIsShown': False, 'threadIDIsShown': False}
    self._levelModified = False

    self._backendsList = []

    # name of the Logging
    self.name = str(name)
    self._logger = logging.getLogger(fatherName).getChild(self.name)
    # update the custom name of the Logging adding the new Logging name in the
    # entire path
    self._customName = os.path.join("/", customName, self.name)

    # Locks to make Logging thread-safe
    # we use RLock to prevent blocking in the Logging
    # lockInit to protect the initialization of a sublogger
    self._lockInit = self._lockRing.getLock("init")
    # lockOptions to protect the option modifications and the backendsList
    self._lockOptions = self._lockRing.getLock("options", recursive=True)
    # lockLevel to protect the level
    self._lockLevel = self._lockRing.getLock("level", recursive=True)
    # lockObjectLoader to protect the ObjectLoader singleton
    self._lockObjectLoader = self._lockRing.getLock("objectLoader")

  def showHeaders(self, yesno=True):
    """
    Depending on the value, display or not the prefix of the message.

    :params yesno: boolean determining the behaviour of the display
    """
    self._setOption('headerIsShown', yesno)

  def showThreadIDs(self, yesno=True):
    """
    Depending on the value, display or not the thread ID.

    :params yesno: boolean determining the behaviour of the display
    """
    self._setOption('threadIDIsShown', yesno)

  def _setOption(self, optionName, value, directCall=True):
    """
    Depending on the value, modify the value of the option.
    Propagate the option to the children.
    The options of the children will be updated if they were not modified before by a developer.

    :params optionName: string representing the name of the option to modify
    :params value: boolean to give to the option
    :params directCall: boolean indicating if it is a call by the user or not
    """
    # lock to prevent that two threads change the options at the same time
    self._lockOptions.acquire()
    try:
      if self._optionsModified[optionName] and not directCall:
        return

      if directCall:
        self._optionsModified[optionName] = True

      # update option
      self._options[optionName] = value

      # propagate in the children
      for child in self._children.itervalues():
        child._setOption(optionName, value, directCall=False)  # pylint: disable=protected-access
      # update the format to apply the option change
      self._generateBackendFormat()
    finally:
      self._lockOptions.release()

  def registerBackends(self, desiredBackends, backendOptions=None):
    """
    Attach a list of backends to the Logging object.
    Convert backend name to backend class name to a Backend object and add it to the Logging object

    :params desiredBackends: a list of different names attaching to differents backends.
                             list of the possible values: ['stdout', 'stderr', 'file', 'server']
    :params backendOptions: dictionary of different backend options.
                            example: FileName='/tmp/log.txt'
    """
    for backendName in desiredBackends:
      self.registerBackend(backendName, backendOptions)

  def registerBackend(self, desiredBackend, backendOptions=None):
    """
    Attach a backend to the Logging object.
    Convert backend name to backend class name to a Backend object and add it to the Logging object

    :params desiredBackend: a name attaching to a backend type.
                            list of the possible values: ['stdout', 'stderr', 'file', 'server']
    :params backendOptions: dictionary of different backend options.
                            example: FileName='/tmp/log.txt'
    """
    # Remove white space and capitalize the first letter
    desiredBackend = desiredBackend.strip()
    desiredBackend = desiredBackend[0].upper() + desiredBackend[1:]
    _class = self.__loadLogClass('Resources.LogBackends.%sBackend' % desiredBackend)
    if _class['OK']:
      # add the backend instance to the Logging
      self._addBackend(_class['Value'](), backendOptions)
      self._generateBackendFormat()
    else:
      self._generateBackendFormat()
      self.warn("%s is not a valid backend name." % desiredBackend)

  def _addBackend(self, backend, backendOptions=None):
    """
    Attach a Backend object to the Logging object.

    :params backend: Backend object that has to be added
    :params backendOptions: a dictionary of different backend options.
                            example: {'FileName': '/tmp/log.txt'}
    """
    backend.createHandler(backendOptions)

    # lock to prevent that the level change before adding the new backend in the backendsList
    # and to prevent a change of the backendsList during the reading of the
    # list
    self._lockLevel.acquire()
    self._lockOptions.acquire()
    try:
      # update the level of the new backend to respect the Logging level
      backend.setLevel(self._level)
      self._logger.addHandler(backend.getHandler())
      self._addFilter(backend, backendOptions)
      self._backendsList.append(backend)
    finally:
      self._lockLevel.release()
      self._lockOptions.release()

  def setLevel(self, levelName):
    """
    Check if the level name exists and get the integer value before setting it.

    :params levelName: string representing the level to give to the logger

    :return: boolean representing if the setting is done or not
    """
    result = False
    if levelName.upper() in LogLevels.getLevelNames():
      self._setLevel(LogLevels.getLevelValue(levelName))
      result = True
    return result

  def _setLevel(self, level, directCall=True):
    """
    Set a level to the backends attached to this Logging.
    Set the level of the Logging too.
    Propagate the level to its children.

    :params level: integer representing the level to give to the logger
    :params directCall: boolean indicating if it is a call by the user or not
    """
    # lock to prevent that two threads change the level at the same time
    self._lockLevel.acquire()
    try:
      # if the level logging level was previously modified by the developer
      # and it is not a direct call from him, then we return in order to stop
      # the propagation
      if self._levelModified and not directCall:
        return

      if directCall:
        self._levelModified = True

      # update Logging level
      self._level = level

      # lock to prevent a modification of the backendsList
      self._lockOptions.acquire()
      try:
        # update backend levels
        for backend in self._backendsList:
          backend.setLevel(self._level)
      finally:
        self._lockOptions.release()

      # propagate in the children
      for child in self._children.itervalues():
        child._setLevel(level, directCall=False)  # pylint: disable=protected-access
    finally:
      self._lockLevel.release()

  def getLevel(self):
    """
    :return: the name of the level
    """
    return LogLevels.getLevel(self._level)

  def shown(self, levelName):
    """
    Determine if messages with a certain level will be displayed or not.

    :params levelName: string representing the level to analyse

    :return: boolean which give the answer
    """
    # lock to prevent a level change
    self._lockLevel.acquire()
    try:
      result = False
      if levelName.upper() in LogLevels.getLevelNames():
        result = self._level <= LogLevels.getLevelValue(levelName)
      return result
    finally:
      self._lockLevel.release()

  @classmethod
  def getName(cls):
    """
    :return: "system name/component name"
    """
    return cls._componentName

  def getSubName(self):
    """
    :return: the name of the logger
    """
    return self.name

  def getDisplayOptions(self):
    """
    :return: the dictionary of the display options and their values. Must not be redefined
    """
    # lock to save the options which can be modified
    self._lockOptions.acquire()
    try:
      # copy the dictionary to avoid that every Logging has the same
      options = self._options.copy()
      return options
    finally:
      self._lockOptions.release()

  def __loadLogClass(self, modulePath):
    """Load class thread-safe."""
    # import ObjectLoader here to avoid a dependancy loop
    from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
    objLoader = ObjectLoader()
    # lock to avoid problem in ObjectLoader which is a singleton not
    # thread-safe
    self._lockObjectLoader.acquire()
    try:
      # load the Backend class
      return objLoader.loadObject(modulePath)
    finally:
      self._lockObjectLoader.release()
    return S_ERROR()

  @staticmethod
  def getAllPossibleLevels():
    """
    :return: a list of all levels available
    """
    return LogLevels.getLevelNames()

  def always(self, sMsg, sVarMsg=''):
    """
    Always level
    """
    return self._createLogRecord(LogLevels.ALWAYS, sMsg, sVarMsg)

  def notice(self, sMsg, sVarMsg=''):
    """
    Notice level
    """
    return self._createLogRecord(LogLevels.NOTICE, sMsg, sVarMsg)

  def info(self, sMsg, sVarMsg=''):
    """
    Info level
    """
    return self._createLogRecord(LogLevels.INFO, sMsg, sVarMsg)

  def verbose(self, sMsg, sVarMsg=''):
    """
    Verbose level
    """
    return self._createLogRecord(LogLevels.VERBOSE, sMsg, sVarMsg)

  def debug(self, sMsg, sVarMsg=''):
    """
    Debug level
    """
    return self._createLogRecord(LogLevels.DEBUG, sMsg, sVarMsg)

  def warn(self, sMsg, sVarMsg=''):
    """
    Warn
    """
    return self._createLogRecord(LogLevels.WARN, sMsg, sVarMsg)

  def error(self, sMsg, sVarMsg=''):
    """
    Error level
    """
    return self._createLogRecord(LogLevels.ERROR, sMsg, sVarMsg)

  def exception(self, sMsg="", sVarMsg='', lException=False, lExcInfo=False):
    """
    Exception level
    """
    _ = lException  # Make pylint happy
    _ = lExcInfo
    return self._createLogRecord(LogLevels.ERROR, sMsg, sVarMsg, exc_info=True)

  def fatal(self, sMsg, sVarMsg=''):
    """
    Fatal level
    """
    return self._createLogRecord(LogLevels.FATAL, sMsg, sVarMsg)

  def _createLogRecord(self, level, sMsg, sVarMsg, exc_info=False):
    """
    Create a log record according to the level of the message. The log record is always sent to the different backends
    Backends have their own levels and can manage the display of the message or not according to the level.
    Nevertheless, backends and the logger have the same level value,
    so we can test if the message will be displayed or not.

    :params level: positive integer representing the level of the log record
    :params sMsg: string representing the message
    :params sVarMsg: string representing an optional message
    :params exc_info: boolean representing the stacktrace for the exception

    :return: boolean representing the result of the log record creation
    """

    # lock to prevent a level change after that the log is sent.
    self._lockLevel.acquire()
    try:
      # exc_info is only for exception to add the stack trace
      # extra is a way to add extra attributes to the log record:
      # - 'componentname': the system/component name
      # - 'varmessage': the variable message
      # - 'customname' : the name of the logger for the DIRAC usage: without 'root' and separated with '/'
      # extras attributes are not camel case because log record attributes are
      # not either.
      extra = {'componentname': self._componentName,
               'varmessage': sVarMsg,
               'spacer': '' if not sVarMsg else ' ',
               'customname': self._customName}
      self._logger.log(level, "%s", sMsg, exc_info=exc_info, extra=extra)
      # test to know if the message is displayed or not
      isSent = self._level <= level
      return isSent
    finally:
      self._lockLevel.release()

  def showStack(self):
    """
    Display a debug message without any content.

    :return: boolean, True if the message is sent, else False
    """
    return self.debug('')

  def _generateBackendFormat(self):
    """
    Generate the Backends format according to the options
    """
    # lock to prevent the modification of the options during this code block
    # and to prevent a modification of the backendsList
    self._lockOptions.acquire()
    try:
      # give options and level to AbstractBackend to receive the new format for
      # the backends list
      datefmt, fmt = AbstractBackend.createFormat(self._options)

      for backend in self._backendsList:
        backend.setFormat(fmt, datefmt, self._options)
    finally:
      self._lockOptions.release()

  def _addFilter(self, backend, backendOptions):
    """Create a filter and add it to the handler of the backend."""
    for filterName in self.__getFilterList(backendOptions):
      options = self.__getFilterOptionsFromCFG(filterName)
      _class = self.__loadLogClass('Resources.LogFilters.%s' % options.get('Plugin'))
      if _class['OK']:
        # add the backend instance to the Logging
        backend.getHandler().addFilter(_class['Value'](options))
      else:
        self.warn("%r is not a valid Filter name." % filterName)

  def __getFilterList(self, backendOptions):
    """Return list of defined filters."""
    if not (isinstance(backendOptions, dict) and 'Filter' in backendOptions):
      return []
    return [fil.strip() for fil in backendOptions['Filter'].split(',') if fil.strip()]

  def __getFilterOptionsFromCFG(self, logFilter):
    """Get filter options from the configuration..

    :params logFilter: string representing a filter identifier: stdout, file, f04
    """
    # We have to put the import lines here to avoid a dependancy loop
    from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFilterConfig

    # Search filters config in the resources section
    retDictRessources = getFilterConfig(logFilter)
    if retDictRessources['OK']:
      return retDictRessources['Value']
    return {}


  def getSubLogger(self, subName, child=True):
    """
    Create a new Logging object, child of this Logging, if it does not exists.

    :params subName: the name of the child Logging
    """
    _ = child  # make pylint happy
    # lock to prevent that the method initializes two Logging for the same 'logging' logger
    # and to erase the existing _children[subName]
    self._lockInit.acquire()
    try:
      # Check if the object has a child with "subName".
      result = self._children.get(subName)
      if result is not None:
        return result
      # create a new child Logging
      childLogging = Logging(self, self._logger.name,
                             subName, self._customName)
      self._children[subName] = childLogging
      return childLogging
    finally:
      self._lockInit.release()

  def initialized(self):  # pylint: disable=no-self-use
    """
    initialized: Deleted method. Do not use it.
    """
    return True

  def processMessage(self, messageObject):  # pylint: disable=no-self-use
    """
    processMessage: Deleted method. Do not use it.
    """
    _ = messageObject  # make pylint happy
    return False

  def flushAllMessages(self, exitCode=0):
    """
    flushAllMessages: Deleted method. Do not use it.
    """
    pass
Example #31
0
 def __init__( self, msgBroker ):
   self.__inOutLock = LockRing().getLock()
   self.__msgBroker = msgBroker
   self.__byClient = {}
   self.__srvToCliTrid = {}
Example #32
0
class MessageForwarder(object):

  def __init__( self, msgBroker ):
    self.__inOutLock = LockRing().getLock()
    self.__msgBroker = msgBroker
    self.__byClient = {}
    self.__srvToCliTrid = {}

  def addClient( self, cliTrid, destination, clientInitParams, connectParams ):
    if cliTrid in self.__byClient:
      gLogger.fatal( "Trid is duplicated!! this shouldn't happen" )
      return
    msgClient = MessageClient( destination, **clientInitParams )
    msgClient.subscribeToDisconnect( self.__srvDisconnect )
    msgClient.subscribeToAllMessages( self.msgFromSrv )
    msgClient.setUniqueName( connectParams[0] )
    result = msgClient.connect( **connectParams[1] )
    if not result[ 'OK' ]:
      return result
    self.__inOutLock.acquire()
    try:
      self.__byClient[ cliTrid ] = { 'srvEnd' : msgClient,
                                     'srvTrid' : msgClient.getTrid(),
                                     'srvName' : destination }
      self.__srvToCliTrid[ msgClient.getTrid() ] = cliTrid
    finally:
      self.__inOutLock.release()
    return result

  def __srvDisconnect( self, srvEndCli ):
    try:
      cliTrid = self.__srvToCliTrid[ srvEndCli.getTrid() ]
    except IndexError:
      gLogger.exception( "This shouldn't happen!" )
    gLogger.info( "Service %s disconnected messaging connection" % self.__byClient[ cliTrid ][ 'srvName' ] )
    self.__msgBroker.removeTransport( cliTrid )
    self.__removeClient( cliTrid )

  def cliDisconnect( self, cliTrid ):
    if cliTrid not in self.__byClient:
      gLogger.fatal( "This shouldn't happen!" )
      return
    gLogger.info( "Client to %s disconnected messaging connection" % self.__byClient[ cliTrid ][ 'srvName' ] )
    self.__byClient[ cliTrid ][ 'srvEnd' ].disconnect()
    self.__removeClient( cliTrid )

  def __removeClient( self, cliTrid ):
    self.__inOutLock.acquire()
    try:
      try:
        srvTrid = self.__byClient[ cliTrid ][ 'srvTrid' ]
        self.__byClient.pop( cliTrid )
        self.__srvToCliTrid.pop( srvTrid )
      except Exception as e:
        gLogger.exception( "This shouldn't happen!" )
    finally:
      self.__inOutLock.release()

  def msgFromClient( self, cliTrid, msgObj ):
    gLogger.info( "Message %s to %s service" % ( msgObj.getName(), self.__byClient[ cliTrid ][ 'srvName' ] ) )
    result = self.__byClient[ cliTrid ][ 'srvEnd' ].sendMessage( msgObj )
    return result

  def msgFromSrv( self, srvEndCli, msgObj ):
    try:
      cliTrid = self.__srvToCliTrid[ srvEndCli.getTrid() ]
    except:
      gLogger.exception( "This shouldn't happen" )
      return S_ERROR( "MsgFromSrv -> Mismatched srv2cli trid" )
    gLogger.info( "Message %s from %s service" % ( msgObj.getName(), self.__byClient[ cliTrid ][ 'srvName' ] ) )
    return self.__msgBroker.sendMessage( cliTrid, msgObj )
Example #33
0
class SocketInfo:

  __cachedCAsCRLs = False
  __cachedCAsCRLsLastLoaded = 0
  __cachedCAsCRLsLoadLock = LockRing().getLock()


  def __init__( self, infoDict, sslContext = None ):
    self.infoDict = infoDict
    #HACK:DISABLE CRLS!!!!!
    self.infoDict[ 'IgnoreCRLs' ] = True
    if sslContext:
      self.sslContext = sslContext
    else:
      if self.infoDict[ 'clientMode' ]:
        if 'useCertificates' in self.infoDict and self.infoDict[ 'useCertificates' ]:
          retVal = self.__generateContextWithCerts()
        elif 'proxyString' in self.infoDict:
          retVal = self.__generateContextWithProxyString()
        else:
          retVal = self.__generateContextWithProxy()
      else:
        retVal = self.__generateServerContext()
      if not retVal[ 'OK' ]:
        raise Exception( retVal[ 'Message' ] )

  def __getValue( self, optName, default ):
    if optName not in self.infoDict:
      return default
    return self.infoDict[ optName ]

  def setLocalCredentialsLocation( self, credTuple ):
    self.infoDict[ 'localCredentialsLocation' ] = credTuple

  def getLocalCredentialsLocation( self ):
    return self.infoDict[ 'localCredentialsLocation' ]

  def gatherPeerCredentials( self ):
    certList = self.sslSocket.get_peer_certificate_chain()
    #Servers don't receive the whole chain, the last cert comes alone
    if not self.infoDict[ 'clientMode' ]:
      certList.insert( 0, self.sslSocket.get_peer_certificate() )
    peerChain = X509Chain( certList = certList )
    isProxyChain = peerChain.isProxy()['Value']
    isLimitedProxyChain = peerChain.isLimitedProxy()['Value']
    if isProxyChain:
      identitySubject = peerChain.getIssuerCert()['Value'].getSubjectNameObject()[ 'Value' ]
    else:
      identitySubject = peerChain.getCertInChain( 0 )['Value'].getSubjectNameObject()[ 'Value' ]
    credDict = { 'DN' : identitySubject.one_line(),
                 'CN' : identitySubject.commonName,
                 'x509Chain' : peerChain,
                 'isProxy' : isProxyChain,
                 'isLimitedProxy' : isLimitedProxyChain }
    diracGroup = peerChain.getDIRACGroup()
    if diracGroup[ 'OK' ] and diracGroup[ 'Value' ]:
      credDict[ 'group' ] = diracGroup[ 'Value' ]
    self.infoDict[ 'peerCredentials' ] = credDict
    return credDict

  def setSSLSocket( self, sslSocket ):
    self.sslSocket = sslSocket

  def getSSLSocket( self ):
    return self.sslSocket

  def getSSLContext( self ):
    return self.sslContext

  def clone( self ):
    try:
      return S_OK( SocketInfo( dict( self.infoDict ), self.sslContext ) )
    except Exception, e:
      return S_ERROR( str( e ) )
Example #34
0
 def __init__(self, maxReads=10):
     from DIRAC.Core.Utilities.LockRing import LockRing
     self.__lr = LockRing()
     self.__lock = self.__lr.getLock()
     self.__maxReads = maxReads
     self.__semaphore = threading.Semaphore(maxReads)
Example #35
0
  def run( self ):
    """ task execution

    reads and executes ProcessTask :task: out of pending queue and then pushes it
    to the results queue for callback execution

    :param self: self reference
    """
    ## start watchdog thread
    print "PID:%s PPID:%s Started WorkingProcess...." % (os.getpid(), os.getppid()) 
    self.__watchdogThread = threading.Thread( target = self.__watchdog )
    self.__watchdogThread.daemon = True
    self.__watchdogThread.start()
    print "PID:%s PPID:%s started watchdogThread.." % (os.getpid(), os.getppid())
    ## http://cdn.memegenerator.net/instances/400x/19450565.jpg
    if LockRing:
      # Reset all locks
      lr = LockRing()
      lr._openAll()
      lr._setAllEvents()

    ## zero processed task counter
    taskCounter = 0
    ## zero idle loop counter
    idleLoopCount = 0

    ## main loop
    while True:

      ## draining, stopEvent is set, exiting 
      if self.__stopEvent.is_set():
        return

      ## clear task
      self.task = None

      ## read from queue
      try:
	print "PID:%s PPID:%s Will try to GET from Pending Queue..." % (os.getpid(), os.getppid())
        task = self.__pendingQueue.get( block = True, timeout = 5 )   # timeout changed from 10
        print "PID:%s PPID:%s GOT a task from pending queue.." % (os.getpid(), os.getppid())
      except Queue.Empty:
        ## idle loop?
        idleLoopCount += 1
        ## 10th idle loop - exit, nothing to do 
        if idleLoopCount == 2: # changed from 10
          print "PID:%s PPID:%s Tried 10 times to get something, exiting..."% (os.getpid(), os.getppid())
          return 
        continue

      ## toggle __working flag
      self.__working.value = 1
      ## save task
      self.task = task
      ## reset idle loop counter
      idleLoopCount = 0

      ## process task in a separate thread
      self.__processThread = threading.Thread( target = self.__processTask )
      self.__processThread.start()
      print "PID:%s PPID:%s started process Thread..." % (os.getpid(), os.getppid())
      ## join processThread with or without timeout
      if self.task.getTimeOut():
	print "WILL WAIT FOR JOIN(timeout)"
        self.__processThread.join( self.task.getTimeOut()+10 )
      else:
	print "WILL WAIT FOR JOIN"
        self.__processThread.join()

      ## processThread is still alive? stop it!
      if self.__processThread.is_alive():
        self.__processThread._Thread__stop()
        print "MUST FORCE-STOP PROCESS THREAD"
      
      print "PID:%s PPID:%s Process thread done..."% (os.getpid(), os.getppid())
      ## check results and callbacks presence, put task to results queue
      if self.task.hasCallback() or self.task.hasPoolCallback():
        if not self.task.taskResults() and not self.task.taskException():
          self.task.setResult( S_ERROR("Timed out") )
          print ">>>TIMED OUT!!!!"
        self.__resultsQueue.put( task )
        print "ResultsQueue = %s " % self.__resultsQueue.qsize()
      ## increase task counter
      taskCounter += 1
      self.__taskCounter = taskCounter 
      ## toggle __working flag
      self.__working.value = 0
Example #36
0
 def updateLock(self):
     """ update lock """
     if not self.__updateLock:
         self.__updateLock = LockRing().getLock("FTSAgentLock")
     return self.__updateLock
Example #37
0
class Logging(object):
    """
  - Logging is a wrapper of the logger object from the standard "logging" library which integrates some DIRAC concepts.
  - It aimed at seamlessly replacing the previous gLogger implementation and thus provides the same interface.
  - Logging is generally used to create log records, that are then sent to pre-determined backends.

  Each Logging embeds a logger of "logging". Logging can instanciate "children" logging objects and
  all Logging objects inherit from the configuration of LoggingRoot, the first Logging object to be instanciated.
  """

    # componentName is a class variable: the component name is the same for every Logging objects
    # its default value is "Framework" but it can be configured in initialize() in LoggingRoot
    # it can be composed by the system name and the component name. For instance: "Monitoring/Atom"
    _componentName = "Framework"
    # use the lockRing singleton to save the Logging object
    _lockRing = LockRing()
    # lock the configuration of the Logging
    _lockConfig = _lockRing.getLock("config")

    def __init__(self, father=None, fatherName='', name='', customName=''):
        """
    Initialization of the Logging object. By default, 'fatherName' and 'name' are empty,
    because getChild only accepts string and the first empty string corresponds to the root logger.
    Example:
    >>> logging.getLogger('') == logging.getLogger('root') # root logger
    >>> logging.getLogger('root').getChild('log') == logging.getLogger('log') # log child of root

    :param Logging father: father of this new Logging.
    :param str fatherName: name of the father logger in the chain.
    :param str name: name of the logger in the chain.
    :param str customName: name of the logger in the chain:
                            - "root" does not appear at the beginning of the chain
                            - hierarchy "." are replaced by "\"
    """

        # Logging chain
        self._children = {}
        self._parent = father

        # initialize display options and level with the ones of the Logging parent
        if self._parent is not None:
            self._options = self._parent.getDisplayOptions()
        else:
            self._options = {
                'headerIsShown': True,
                'timeStampIsShown': True,
                'contextIsShown': True,
                'threadIDIsShown': False,
                'color': False
            }

        # dictionary of the options modifications: give the same behaviour that the "logging" level
        # - propagation from the parent to the children when their levels are not set by the developer
        # - stop the propagation when a developer set a level to a child
        self._optionsModified = {
            'headerIsShown': False,
            'timeStampIsShown': False,
            'contextIsShown': False,
            'threadIDIsShown': False
        }

        self._backendsList = []

        # name of the Logging
        self.name = str(name)
        self._logger = logging.getLogger(fatherName).getChild(self.name)

        # update the custom name of the Logging adding the new Logging name in the entire path
        self._customName = os.path.join("/", customName, self.name)

        # Locks to make Logging thread-safe
        # we use RLock to prevent blocking in the Logging
        # lockInit to protect the initialization of a sublogger
        self._lockInit = self._lockRing.getLock("init")
        # lockOptions to protect the option modifications and the backendsList
        self._lockOptions = self._lockRing.getLock("options", recursive=True)
        # lockLevel to protect the level
        self._lockLevel = self._lockRing.getLock("level", recursive=True)
        # lockObjectLoader to protect the ObjectLoader singleton
        self._lockObjectLoader = self._lockRing.getLock("objectLoader")

    def showHeaders(self, yesno=True):
        """
    Depending on the value, display or not the prefix of the message.

    :param bool yesno: determine the log record format
    """
        self._setOption('headerIsShown', yesno)

    def showThreadIDs(self, yesno=True):
        """
    Depending on the value, display or not the thread ID.
    Make sure to enable the headers: showHeaders(True) before

    :param bool yesno: determe the log record format
    """
        self._setOption('threadIDIsShown', yesno)

    def showTimeStamps(self, yesno=True):
        """
    Depending on the value, display or not the timestamp of the message.
    Make sure to enable the headers: showHeaders(True) before

    :param bool yesno: determine the log record format
    """
        self._setOption('timeStampIsShown', yesno)

    def showContexts(self, yesno=True):
        """
    Depending on the value, display or not the context of the message.
    Make sure to enable the headers: showHeaders(True) before

    :param bool yesno: determine the log record format
    """
        self._setOption('contextIsShown', yesno)

    def _setOption(self, optionName, value, directCall=True):
        """
    Depending on the value, modify the value of the option and propagate the option to the children.
    The options of the children will be updated if they were not modified before by a developer.

    :param str optionName: name of the option to modify
    :param bool value: value of the option to set
    :param bool directCall: indicate whether the call is performed by a developer
    """
        # lock to prevent that two threads change the options at the same time
        self._lockOptions.acquire()
        try:
            if self._optionsModified[optionName] and not directCall:
                return

            if directCall:
                self._optionsModified[optionName] = True

            # update option
            self._options[optionName] = value

            # propagate in the children
            for child in self._children.values():
                child._setOption(optionName, value, directCall=False)  # pylint: disable=protected-access
        finally:
            self._lockOptions.release()

    def registerBackends(self, desiredBackends, backendOptions=None):
        """
    Attach a list of backends to the Logging object.
    Convert backend names to backend class names to Backend objects and add them to the Logging object

    :param desiredBackends: list of different names attaching to differents backends.
                             list of the possible values: ['stdout', 'stderr', 'file']
    :param backendOptions: dictionary of different backend options. Example: FileName='/tmp/log.txt'
    """
        for backendName in desiredBackends:
            self.registerBackend(backendName, backendOptions)

    def registerBackend(self, desiredBackend, backendOptions=None):
        """
    Attach a backend to the Logging object.
    Convert backend name to backend class name to a Backend object and add it to the Logging object

    :param desiredBackend: a name attaching to a backend type. List of possible values: ['stdout', 'stderr', 'file']
    :param backendOptions: dictionary of different backend options. Example: FileName='/tmp/log.txt'
    """
        # Remove white space and capitalize the first letter
        desiredBackend = desiredBackend.strip()
        desiredBackend = desiredBackend[0].upper() + desiredBackend[1:]
        _class = self.__loadLogClass('Resources.LogBackends.%sBackend' %
                                     desiredBackend)
        if _class['OK']:
            # add the backend instance to the Logging
            self._addBackend(_class['Value'], backendOptions)
        else:
            self.warn("%s is not a valid backend name." % desiredBackend)

    def _addBackend(self, backendType, backendOptions=None):
        """
    Attach a Backend object to the Logging object.

    :param Backend backend: Backend object that has to be added
    :param backendOptions: a dictionary of different backend options. Example: {'FileName': '/tmp/log.txt'}
    """
        # lock to prevent that the level change before adding the new backend in the backendsList
        # and to prevent a change of the backendsList during the reading of the
        # list
        self._lockLevel.acquire()
        self._lockOptions.acquire()
        try:
            backend = backendType(backendOptions)
            self._logger.addHandler(backend.getHandler())
            self._addFilter(backend, backendOptions)
            self._backendsList.append(backend)
        finally:
            self._lockLevel.release()
            self._lockOptions.release()

    def _addFilter(self, backend, backendOptions):
        """
    Create a filter and add it to the handler of the backend.
    """
        for filterName in self.__getFilterList(backendOptions):
            options = self.__getFilterOptionsFromCFG(filterName)
            _class = self.__loadLogClass('Resources.LogFilters.%s' %
                                         options.get('Plugin'))
            if _class['OK']:
                # add the backend instance to the Logging
                backend.getHandler().addFilter(_class['Value'](options))
            else:
                self.warn("%r is not a valid Filter name." % filterName)

    def __getFilterList(self, backendOptions):
        """
    Return list of defined filters.
    """
        if not (isinstance(backendOptions, dict)
                and 'Filter' in backendOptions):
            return []
        return [
            fil.strip() for fil in backendOptions['Filter'].split(',')
            if fil.strip()
        ]

    def __getFilterOptionsFromCFG(self, logFilter):
        """Get filter options from the configuration..

    :param logFilter: filter identifier: stdout, file, f04
    """
        # We have to put the import lines here to avoid a dependancy loop
        from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFilterConfig

        # Search filters config in the resources section
        retDictRessources = getFilterConfig(logFilter)
        if retDictRessources['OK']:
            return retDictRessources['Value']
        return {}

    def setLevel(self, levelName):
        """
    Check if the level name exists and set it.

    :param levelName: string representing the level to give to the logger
    :return: boolean representing if the setting is done or not
    """
        result = False
        if levelName.upper() in LogLevels.getLevelNames():
            self._logger.setLevel(LogLevels.getLevelValue(levelName))
            result = True
        return result

    def getLevel(self):
        """
    :return: the name of the level
    """
        return LogLevels.getLevel(self._logger.getEffectiveLevel())

    def shown(self, levelName):
        """
    Determine whether messages with a certain level will be displayed.

    :param levelName: string representing the level to analyse

    :return: boolean which give the answer
    """
        # lock to prevent a level change
        self._lockLevel.acquire()
        try:
            result = False
            if levelName.upper() in LogLevels.getLevelNames():
                result = LogLevels.getLevelValue(
                    self.getLevel()) <= LogLevels.getLevelValue(levelName)
            return result
        finally:
            self._lockLevel.release()

    @classmethod
    def getName(cls):
        """
    :return: "system name/component name"
    """
        return cls._componentName

    def getSubName(self):
        """
    :return: the name of the logger
    """
        return self.name

    def getDisplayOptions(self):
        """
    :return: the dictionary of the display options and their values. Must not be redefined
    """
        # lock to save the options which can be modified
        self._lockOptions.acquire()
        try:
            # copy the dictionary to avoid that every Logging has the same
            options = self._options.copy()
            return options
        finally:
            self._lockOptions.release()

    def __loadLogClass(self, modulePath):
        """Load class thread-safe."""
        # import ObjectLoader here to avoid a dependancy loop
        from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
        objLoader = ObjectLoader()
        # lock to avoid problem in ObjectLoader which is a singleton not
        # thread-safe
        self._lockObjectLoader.acquire()
        try:
            # load the Backend class
            return objLoader.loadObject(modulePath)
        finally:
            self._lockObjectLoader.release()
        return S_ERROR()

    @staticmethod
    def getAllPossibleLevels():
        """
    :return: a list of all levels available
    """
        return LogLevels.getLevelNames()

    def always(self, sMsg, sVarMsg=''):
        """
    Always level
    """
        return self._createLogRecord(LogLevels.ALWAYS, sMsg, sVarMsg)

    def notice(self, sMsg, sVarMsg=''):
        """
    Notice level
    """
        return self._createLogRecord(LogLevels.NOTICE, sMsg, sVarMsg)

    def info(self, sMsg, sVarMsg=''):
        """
    Info level
    """
        return self._createLogRecord(LogLevels.INFO, sMsg, sVarMsg)

    def verbose(self, sMsg, sVarMsg=''):
        """
    Verbose level
    """
        return self._createLogRecord(LogLevels.VERBOSE, sMsg, sVarMsg)

    def debug(self, sMsg, sVarMsg=''):
        """
    Debug level
    """
        return self._createLogRecord(LogLevels.DEBUG, sMsg, sVarMsg)

    def warn(self, sMsg, sVarMsg=''):
        """
    Warn
    """
        return self._createLogRecord(LogLevels.WARN, sMsg, sVarMsg)

    def error(self, sMsg, sVarMsg=''):
        """
    Error level
    """
        return self._createLogRecord(LogLevels.ERROR, sMsg, sVarMsg)

    def exception(self, sMsg="", sVarMsg='', lException=False, lExcInfo=False):
        """
    Exception level
    """
        _ = lException  # Make pylint happy
        _ = lExcInfo
        return self._createLogRecord(LogLevels.ERROR,
                                     sMsg,
                                     sVarMsg,
                                     exc_info=True)

    def fatal(self, sMsg, sVarMsg=''):
        """
    Fatal level
    """
        return self._createLogRecord(LogLevels.FATAL, sMsg, sVarMsg)

    def _createLogRecord(self, level, sMsg, sVarMsg, exc_info=False):
        """
    Create a log record according to the level of the message.

    - The log record is always sent to the different backends
    - Backends have their own levels and may manage the display of the log record

    :param int level: level of the log record
    :param str sMsg: message
    :param str sVarMsg: additional message
    :param bool exc_info: indicates whether the stacktrace has to appear in the log record

    :return: boolean representing the result of the log record creation
    """

        # lock to prevent a level change after that the log is sent.
        self._lockLevel.acquire()
        try:
            # exc_info is only for exception to add the stack trace

            # extra is a way to add extra attributes to the log record:
            # - 'componentname': the system/component name
            # - 'varmessage': the variable message
            # - 'customname' : the name of the logger for the DIRAC usage: without 'root' and separated with '/'
            # as log records, extras attributes are not camel case
            extra = {
                'componentname': self._componentName,
                'varmessage': str(sVarMsg),
                'spacer': '' if not sVarMsg else ' ',
                'customname': self._customName
            }

            # options such as headers and threadIDs also depend on the logger, we have to add them to extra
            extra.update(self._options)

            self._logger.log(level, "%s", sMsg, exc_info=exc_info, extra=extra)
            # check whether the message is displayed
            isSent = LogLevels.getLevelValue(self.getLevel()) <= level
            return isSent
        finally:
            self._lockLevel.release()

    def showStack(self):
        """
    Display a debug message without any content.

    :return: boolean, True if the message is sent, else False
    """
        return self.debug('')

    def getSubLogger(self, subName, child=True):
        """
    Create a new Logging object, child of this Logging, if it does not exists.

    :param str subName: name of the child Logging
    """
        _ = child  # make pylint happy
        # lock to prevent that the method initializes two Logging for the same 'logging' logger
        # and to erase the existing _children[subName]
        self._lockInit.acquire()
        try:
            # Check if the object has a child with "subName".
            result = self._children.get(subName)
            if result is not None:
                return result
            # create a new child Logging
            childLogging = Logging(self, self._logger.name, subName,
                                   self._customName)
            self._children[subName] = childLogging
            return childLogging
        finally:
            self._lockInit.release()
Example #38
0
    def __init__(self, lockName="", recursive=False):
        from DIRAC.Core.Utilities.LockRing import LockRing

        self.__lockName = lockName
        self.__lr = LockRing()
        self.__lock = self.__lr.getLock(lockName, recursive=recursive)
Example #39
0
class Cache(object):
    """
    Cache basic class.
    
    WARNING: None of its methods is thread safe. Acquire / Release lock when
    using them !
  """
    def __init__(self, lifeTime, updateFunc):
        """
    Constructor
    
    :Parameters:
      **lifeTime** - `int`
        Lifetime of the elements in the cache ( seconds ! )
      **updateFunc** - `function`
        This function MUST return a S_OK | S_ERROR object. In the case of the first,
        its value must be a dictionary.
    
    """

        # We set a 20% of the lifetime randomly, so that if we have thousands of jobs
        # starting at the same time, all the caches will not end at the same time.
        randomLifeTimeBias = 0.2 * random.random()

        self.log = gLogger.getSubLogger(self.__class__.__name__)

        self.__lifeTime = int(lifeTime * (1 + randomLifeTimeBias))
        self.__updateFunc = updateFunc
        # The records returned from the cache must be valid at least 30 seconds.
        self.__validSeconds = 30

        # Cache
        self.__cache = DictCache()
        self.__cacheLock = LockRing()
        self.__cacheLock.getLock(self.__class__.__name__)

    #.............................................................................
    # internal cache object getter

    def cacheKeys(self):
        """
    Cache keys getter
      
    :returns: list with valid keys on the cache
    """

        return self.__cache.getKeys(validSeconds=self.__validSeconds)

    #.............................................................................
    # acquire / release Locks

    def acquireLock(self):
        """
    Acquires Cache lock
    """

        self.__cacheLock.acquire(self.__class__.__name__)

    def releaseLock(self):
        """
    Releases Cache lock
    """

        self.__cacheLock.release(self.__class__.__name__)

    #.............................................................................
    # Cache getters

    def get(self, cacheKeys):
        """
    Gets values for cacheKeys given, if all are found ( present on the cache and
    valid ), returns S_OK with the results. If any is not neither present not
    valid, returns S_ERROR. 
    
    :Parameters:
      **cacheKeys** - `list`
        list of keys to be extracted from the cache
        
    :return: S_OK | S_ERROR
    """

        result = {}

        for cacheKey in cacheKeys:

            cacheRow = self.__cache.get(cacheKey,
                                        validSeconds=self.__validSeconds)
            if not cacheRow:
                self.log.error(str(cacheKey))
                return S_ERROR('Cannot get %s' % str(cacheKey))
            result.update({cacheKey: cacheRow})

        return S_OK(result)

    #.............................................................................
    # Cache refreshers

    def refreshCache(self):
        """     
    Purges the cache and gets fresh data from the update function.
    
    :return: S_OK | S_ERROR. If the first, its content is the new cache.    
    """

        self.log.verbose('refreshing...')

        self.__cache.purgeAll()

        newCache = self.__updateFunc()
        if not newCache['OK']:
            self.log.error(newCache['Message'])
            return newCache

        newCache = self.__updateCache(newCache['Value'])

        self.log.verbose('refreshed')

        return newCache

    #.............................................................................
    # Private methods

    def __updateCache(self, newCache):
        """
    Given the new cache dictionary, updates the internal cache with it. It sets
    a duration to the entries of <self.__lifeTime> seconds.
    
    :Parameters:
      **newCache** - `dict`
        dictionary containing a new cache
    
    :return: dictionary. It is newCache argument.    
    """

        for cacheKey, cacheValue in newCache.items():
            self.__cache.add(cacheKey, self.__lifeTime, value=cacheValue)

        # We are assuming nothing will fail while inserting in the cache. There is
        # no apparent reason to suspect from that piece of code.
        return S_OK(newCache)
Example #40
0
class DictCache:

  def __init__( self, deleteFunction = False ):
    """
    Initialize the dict cache.
      If a delete function is specified it will be invoked when deleting a cached object
    """
    
    self.__lock = LockRing()
    self.__lock.getLock( self.__class__.__name__, recursive = True )
    
    self.__cache = {}
    self.__deleteFunction = deleteFunction

  def exists( self, cKey, validSeconds = 0 ):
    """
    Returns True/False if the key exists for the given number of seconds
      Arguments:
        - cKey : identification key of the record
        - validSeconds : The amount of seconds the key has to be valid for
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      #Is the key in the cache?
      if cKey in self.__cache:
        expTime = self.__cache[ cKey ][ 'expirationTime' ]
        #If it's valid return True!
        if expTime > datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ):
          return True
        else:
          #Delete expired
          self.delete( cKey )
      return False
    finally:
      self.__lock.release( self.__class__.__name__ )

  def delete( self, cKey ):
    """
    Delete a key from the cache
      Arguments:
        - cKey : identification key of the record
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      if cKey not in self.__cache:
        return
      if self.__deleteFunction:
        self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
      del( self.__cache[ cKey ] )
    finally:
      self.__lock.release( self.__class__.__name__ )

  def add( self, cKey, validSeconds, value = None ):
    """
    Add a record to the cache
      Arguments:
        - cKey : identification key of the record
        - validSeconds : valid seconds of this record
        - value : value of the record
    """
    if max( 0, validSeconds ) == 0:
      return
    self.__lock.acquire( self.__class__.__name__ )
    try:
      vD = { 'expirationTime' : datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ),
             'value' : value }
      self.__cache[ cKey ] = vD
    finally:
      self.__lock.release( self.__class__.__name__ )

  def get( self, cKey, validSeconds = 0 ):
    """
    Get a record from the cache
      Arguments:
        - cKey : identification key of the record
        - validSeconds : The amount of seconds the key has to be valid for
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      #Is the key in the cache?
      if cKey in self.__cache:
        expTime = self.__cache[ cKey ][ 'expirationTime' ]
        #If it's valid return True!
        if expTime > datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ):
          return self.__cache[ cKey ][ 'value' ]
        else:
          #Delete expired
          self.delete( cKey )
      return False
    finally:
      self.__lock.release( self.__class__.__name__ )

  def showContentsInString( self ):
    """
    Return a human readable string to represent the contents
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      data = []
      for cKey in self.__cache:
        data.append( "%s:" % str( cKey ) )
        data.append( "\tExp: %s" % self.__cache[ cKey ][ 'expirationTime' ] )
        if self.__cache[ cKey ][ 'value' ]:
          data.append( "\tVal: %s" % self.__cache[ cKey ][ 'value' ] )
      return "\n".join( data )
    finally:
      self.__lock.release( self.__class__.__name__ )

  def getKeys( self, validSeconds = 0 ):
    """
    Get keys for all contents
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      keys = []
      limitTime = datetime.datetime.now() + datetime.timedelta( seconds = validSeconds )
      for cKey in self.__cache:
        if self.__cache[ cKey ][ 'expirationTime' ] > limitTime:
          keys.append( cKey )
      return keys
    finally:
      self.__lock.release( self.__class__.__name__ )

  def purgeExpired( self, expiredInSeconds = 0 ):
    """
    Purge all entries that are expired or will be expired in <expiredInSeconds>
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      keys = []
      limitTime = datetime.datetime.now() + datetime.timedelta( seconds = expiredInSeconds )
      for cKey in self.__cache:
        if self.__cache[ cKey ][ 'expirationTime' ] < limitTime:
          keys.append( cKey )
      for cKey in keys:
        if self.__deleteFunction:
          self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
        del( self.__cache[ cKey ] )
    finally:
      self.__lock.release( self.__class__.__name__ )

  def purgeAll( self ):
    """
    Purge all entries
    """
    self.__lock.acquire( self.__class__.__name__ )
    try:
      keys = self.__cache.keys()
      for cKey in keys:
        if self.__deleteFunction:
          self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
        del( self.__cache[ cKey ] )
    finally:
      self.__lock.release( self.__class__.__name__)
Example #41
0
 def graphLock( cls ):
   """ get graph lock """
   if not cls.__graphLock:
     cls.__graphLock = LockRing().getLock( "FTSGraphLock" )
   return cls.__graphLock
Example #42
0
class MessageForwarder(object):
    def __init__(self, msgBroker):
        self.__inOutLock = LockRing().getLock()
        self.__msgBroker = msgBroker
        self.__byClient = {}
        self.__srvToCliTrid = {}

    def addClient(self, cliTrid, destination, clientInitParams, connectParams):
        if cliTrid in self.__byClient:
            gLogger.fatal("Trid is duplicated!! this shouldn't happen")
            return
        msgClient = MessageClient(destination, **clientInitParams)
        msgClient.subscribeToDisconnect(self.__srvDisconnect)
        msgClient.subscribeToAllMessages(self.msgFromSrv)
        msgClient.setUniqueName(connectParams[0])
        result = msgClient.connect(**connectParams[1])
        if not result["OK"]:
            return result
        self.__inOutLock.acquire()
        try:
            self.__byClient[cliTrid] = {"srvEnd": msgClient, "srvTrid": msgClient.getTrid(), "srvName": destination}
            self.__srvToCliTrid[msgClient.getTrid()] = cliTrid
        finally:
            self.__inOutLock.release()
        return result

    def __srvDisconnect(self, srvEndCli):
        try:
            cliTrid = self.__srvToCliTrid[srvEndCli.getTrid()]
        except IndexError:
            gLogger.exception("This shouldn't happen!")
        gLogger.info("Service %s disconnected messaging connection" % self.__byClient[cliTrid]["srvName"])
        self.__msgBroker.removeTransport(cliTrid)
        self.__removeClient(cliTrid)

    def cliDisconnect(self, cliTrid):
        if cliTrid not in self.__byClient:
            gLogger.fatal("This shouldn't happen!")
            return
        gLogger.info("Client to %s disconnected messaging connection" % self.__byClient[cliTrid]["srvName"])
        self.__byClient[cliTrid]["srvEnd"].disconnect()
        self.__removeClient(cliTrid)

    def __removeClient(self, cliTrid):
        self.__inOutLock.acquire()
        try:
            try:
                srvTrid = self.__byClient[cliTrid]["srvTrid"]
                self.__byClient.pop(cliTrid)
                self.__srvToCliTrid.pop(srvTrid)
            except Exception as e:
                gLogger.exception("This shouldn't happen!", e)
        finally:
            self.__inOutLock.release()

    def msgFromClient(self, cliTrid, msgObj):
        gLogger.info("Message %s to %s service" % (msgObj.getName(), self.__byClient[cliTrid]["srvName"]))
        result = self.__byClient[cliTrid]["srvEnd"].sendMessage(msgObj)
        return result

    def msgFromSrv(self, srvEndCli, msgObj):
        try:
            cliTrid = self.__srvToCliTrid[srvEndCli.getTrid()]
        except Exception:
            gLogger.exception("This shouldn't happen")
            return S_ERROR("MsgFromSrv -> Mismatched srv2cli trid")
        gLogger.info("Message %s from %s service" % (msgObj.getName(), self.__byClient[cliTrid]["srvName"]))
        return self.__msgBroker.sendMessage(cliTrid, msgObj)
Example #43
0
    def run(self):
        """
    Task execution

    Reads and executes ProcessTask :task: out of pending queue and then pushes it
    to the results queue for callback execution.

    :param self: self reference
    """
        # # start watchdog thread
        self.__watchdogThread = threading.Thread(target=self.__watchdog)
        self.__watchdogThread.daemon = True
        self.__watchdogThread.start()

        # # http://cdn.memegenerator.net/instances/400x/19450565.jpg
        if LockRing:
            # Reset all locks
            lr = LockRing()
            lr._openAll()
            lr._setAllEvents()

        # # zero processed task counter
        taskCounter = 0
        # # zero idle loop counter
        idleLoopCount = 0

        # # main loop
        while True:

            # # draining, stopEvent is set, exiting
            if self.__stopEvent.is_set():
                return

            # # clear task
            self.task = None

            # # read from queue
            try:
                task = self.__pendingQueue.get(block=True, timeout=10)
            except Queue.Empty:
                # # idle loop?
                idleLoopCount += 1
                # # 10th idle loop - exit, nothing to do
                if idleLoopCount == 10 and not self.__keepRunning:
                    return
                continue

            # # toggle __working flag
            self.__working.value = 1
            # # save task
            self.task = task
            # # reset idle loop counter
            idleLoopCount = 0

            # # process task in a separate thread
            self.__processThread = threading.Thread(target=self.__processTask)
            self.__processThread.start()

            timeout = False
            noResults = False
            # # join processThread with or without timeout
            if self.task.getTimeOut():
                self.__processThread.join(self.task.getTimeOut() + 10)
            else:
                self.__processThread.join()

            # # processThread is still alive? stop it!
            if self.__processThread.is_alive():
                self.__processThread._Thread__stop()
                self.task.setResult(S_ERROR(errno.ETIME, "Timed out"))
                timeout = True
            # if the task finished with no results, something bad happened, e.g.
            # undetected timeout
            if not self.task.taskResults() and not self.task.taskException():
                self.task.setResult(S_ERROR("Task produced no results"))
                noResults = True

            # # check results and callbacks presence, put task to results queue
            if self.task.hasCallback() or self.task.hasPoolCallback():
                self.__resultsQueue.put(task)
            if timeout or noResults:
                # The task execution timed out, stop the process to prevent it from running
                # in the background
                time.sleep(1)
                os.kill(self.pid, signal.SIGKILL)
                return
            # # increase task counter
            taskCounter += 1
            self.__taskCounter = taskCounter
            # # toggle __working flag
            self.__working.value = 0
Example #44
0
class SSLTransport(BaseTransport):

    __readWriteLock = LockRing().getLock()

    def __init__(self, *args, **kwargs):
        self.__writesDone = 0
        self.__locked = False
        BaseTransport.__init__(self, *args, **kwargs)

    def __lock(self, timeout=1000):
        while self.__locked and timeout:
            time.sleep(0.005)
            timeout -= 0.005
        if not timeout:
            return False
        SSLTransport.__readWriteLock.acquire()
        if self.__locked:
            SSLTransport.__readWriteLock.release()
            return self.__lock(timeout)
        self.__locked = True
        SSLTransport.__readWriteLock.release()
        return True

    def __unlock(self):
        self.__locked = False

    def initAsClient(self):
        retVal = gSocketInfoFactory.getSocket(self.stServerAddress,
                                              **self.extraArgsDict)
        if not retVal['OK']:
            return retVal
        self.oSocketInfo = retVal['Value']
        self.oSocket = self.oSocketInfo.getSSLSocket()
        if not self.oSocket.session_reused():
            gLogger.debug("New session connecting to server at %s" %
                          str(self.stServerAddress))
        self.remoteAddress = self.oSocket.getpeername()
        return S_OK()

    def initAsServer(self):
        if not self.serverMode():
            raise RuntimeError("Must be initialized as server mode")
        retVal = gSocketInfoFactory.getListeningSocket(self.stServerAddress,
                                                       self.iListenQueueSize,
                                                       self.bAllowReuseAddress,
                                                       **self.extraArgsDict)
        if not retVal['OK']:
            return retVal
        self.oSocketInfo = retVal['Value']
        self.oSocket = self.oSocketInfo.getSSLSocket()
        return S_OK()

    def close(self):
        gLogger.debug("Closing socket")
        try:
            self.oSocket.shutdown()
            self.oSocket.close()
        except:
            pass

    def renewServerContext(self):
        BaseTransport.renewServerContext(self)
        result = gSocketInfoFactory.renewServerContext(self.oSocketInfo)
        if not result['OK']:
            return result
        self.oSocketInfo = result['Value']
        self.oSocket = self.oSocketInfo.getSSLSocket()
        return S_OK()

    def handshake(self):
        retVal = self.oSocketInfo.doServerHandshake()
        if not retVal['OK']:
            return retVal
        creds = retVal['Value']
        if not self.oSocket.session_reused():
            gLogger.debug("New session connecting from client at %s" %
                          str(self.getRemoteAddress()))
        for key in creds.keys():
            self.peerCredentials[key] = creds[key]
        return S_OK()

    def setClientSocket(self, oSocket):
        if self.serverMode():
            raise RuntimeError("Must be initialized as client mode")
        self.oSocketInfo.setSSLSocket(oSocket)
        self.oSocket = oSocket
        self.remoteAddress = self.oSocket.getpeername()
        self.oSocket.settimeout(self.oSocketInfo.infoDict['timeout'])

    def acceptConnection(self):
        oClientTransport = SSLTransport(self.stServerAddress)
        oClientSocket, stClientAddress = self.oSocket.accept()
        retVal = self.oSocketInfo.clone()
        if not retVal['OK']:
            return retVal
        oClientTransport.oSocketInfo = retVal['Value']
        oClientTransport.setClientSocket(oClientSocket)
        return S_OK(oClientTransport)

    def _read(self, bufSize=4096, skipReadyCheck=False):
        self.__lock()
        try:
            timeout = self.oSocketInfo.infoDict['timeout']
            if timeout:
                start = time.time()
            while True:
                if timeout:
                    if time.time() - start > timeout:
                        return S_ERROR("Socket read timeout exceeded")
                try:
                    return S_OK(self.oSocket.recv(bufSize))
                except GSI.SSL.WantReadError:
                    time.sleep(0.001)
                except GSI.SSL.WantWriteError:
                    time.sleep(0.001)
                except GSI.SSL.ZeroReturnError:
                    return S_OK("")
                except Exception, e:
                    return S_ERROR("Exception while reading from peer: %s" %
                                   str(e))
        finally:
            self.__unlock()

    def isLocked(self):
        return self.__locked

    def _write(self, buffer):
        self.__lock()
        try:
            #Renegotiation
            if not self.oSocketInfo.infoDict['clientMode']:
                #self.__writesDone += 1
                if self.__writesDone > 1000:

                    self.__writesDone = 0
                    ok = self.oSocket.renegotiate()
                    if ok:
                        try:
                            ok = self.oSocket.do_handshake()
                        except Exception, e:
                            return S_ERROR("Renegotiation failed: %s" % str(e))

            sentBytes = 0
            timeout = self.oSocketInfo.infoDict['timeout']
            if timeout:
                start = time.time()
            while sentBytes < len(buffer):
                try:
                    if timeout:
                        if time.time() - start > timeout:
                            return S_ERROR("Socket write timeout exceeded")
                    sent = self.oSocket.write(buffer[sentBytes:])
                    if sent == 0:
                        return S_ERROR("Connection closed by peer")
                    if sent > 0:
                        sentBytes += sent
                except GSI.SSL.WantWriteError:
                    time.sleep(0.001)
                except GSI.SSL.WantReadError:
                    time.sleep(0.001)
                except Exception, e:
                    return S_ERROR("Error while sending: %s" % str(e))
            return S_OK(sentBytes)
Example #45
0
  def run( self ):
    """ 
    Task execution

    Reads and executes ProcessTask :task: out of pending queue and then pushes it
    to the results queue for callback execution.

    :param self: self reference
    """
    ## start watchdog thread
    self.__watchdogThread = threading.Thread( target = self.__watchdog )
    self.__watchdogThread.daemon = True
    self.__watchdogThread.start()

    ## http://cdn.memegenerator.net/instances/400x/19450565.jpg
    if LockRing:
      # Reset all locks
      lr = LockRing()
      lr._openAll()
      lr._setAllEvents()

    ## zero processed task counter
    taskCounter = 0
    ## zero idle loop counter
    idleLoopCount = 0

    ## main loop
    while True:

      ## draining, stopEvent is set, exiting 
      if self.__stopEvent.is_set():
        return

      ## clear task
      self.task = None

      ## read from queue
      try:
        task = self.__pendingQueue.get( block = True, timeout = 10 )   
      except Queue.Empty:
        ## idle loop?
        idleLoopCount += 1
        ## 10th idle loop - exit, nothing to do 
        if idleLoopCount == 10 and not self.__keepRunning:
          return 
        continue

      ## toggle __working flag
      self.__working.value = 1
      ## save task
      self.task = task
      ## reset idle loop counter
      idleLoopCount = 0

      ## process task in a separate thread
      self.__processThread = threading.Thread( target = self.__processTask )
      self.__processThread.start()

      timeout = False
      noResults = False
      ## join processThread with or without timeout
      if self.task.getTimeOut():
        self.__processThread.join( self.task.getTimeOut()+10 )
      else:
        self.__processThread.join()

      ## processThread is still alive? stop it!
      if self.__processThread.is_alive():
        self.__processThread._Thread__stop()
        self.task.setResult( S_ERROR( errno.ETIME, "Timed out" ) )
        timeout = True
      # if the task finished with no results, something bad happened, e.g. 
      # undetected timeout  
      if not self.task.taskResults() and not self.task.taskException():
        self.task.setResult( S_ERROR("Task produced no results") )  
        noResults = True
      
      ## check results and callbacks presence, put task to results queue
      if self.task.hasCallback() or self.task.hasPoolCallback():
        self.__resultsQueue.put( task )
      if timeout or noResults:  
        # The task execution timed out, stop the process to prevent it from running 
        # in the background
        time.sleep( 1 )
        os.kill( self.pid, signal.SIGKILL )
        return   
      ## increase task counter
      taskCounter += 1
      self.__taskCounter = taskCounter 
      ## toggle __working flag
      self.__working.value = 0
Example #46
0
class WORM:
    """
  Write One - Read Many
  """

    def __init__(self, maxReads=10):
        from DIRAC.Core.Utilities.LockRing import LockRing

        self.__lr = LockRing()
        self.__lock = self.__lr.getLock()
        self.__maxReads = maxReads
        self.__semaphore = threading.Semaphore(maxReads)

    def write(self, funcToCall):
        """
    Write decorator
    """

        def __doWriteLock(*args, **kwargs):
            try:
                self.__startWriteZone()
                return funcToCall(*args, **kwargs)
            finally:
                self.__endWriteZone()

        return __doWriteLock

    def read(self, funcToCall):
        """
    Read decorator
    """

        def __doReadLock(*args, **kwargs):
            try:
                self.__startReadZone()
                return funcToCall(*args, **kwargs)
            finally:
                self.__endReadZone()

        return __doReadLock

    def __startWriteZone(self):
        """
    Locks Event to prevent further threads from reading.
    Stops current thread until no other thread is accessing.
    PRIVATE USE
    """
        self.__lock.acquire()
        for i in range(self.__maxReads):
            self.__semaphore.acquire()
        self.__lock.release()

    def __endWriteZone(self):
        """
    Unlocks Event.
    PRIVATE USE
    """
        for i in range(self.__maxReads):
            self.__semaphore.release()

    def __startReadZone(self):
        """
    Start of danger zone. This danger zone may be or may not be a mutual exclusion zone.
    Counter is maintained to know how many threads are inside and be able to enable and disable mutual exclusion.
    PRIVATE USE
    """
        self.__semaphore.acquire()

    def __endReadZone(self):
        """
    End of danger zone.
    PRIVATE USE
    """
        self.__semaphore.release()