def __refreshAndPublish(self): self.__lastUpdateTime = time.time() gLogger.info("Refreshing from master server") from DIRAC.Core.DISET.RPCClient import RPCClient sMasterServer = gConfigurationData.getMasterServer() if sMasterServer: oClient = RPCClient( sMasterServer, timeout=self.__timeout, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck(), ) dRetVal = _updateFromRemoteLocation(oClient) if not dRetVal["OK"]: gLogger.error("Can't update from master server", dRetVal["Message"]) return False if gConfigurationData.getAutoPublish(): gLogger.info("Publishing to master server...") dRetVal = oClient.publishSlaveServer(self.__url) if not dRetVal["OK"]: gLogger.error("Can't publish to master server", dRetVal["Message"]) return True else: gLogger.warn("No master server is specified in the configuration, trying to get data from other slaves") return self.__refresh()["OK"]
def __sockConnect( self, hostAddress, sockType, timeout, retries ): try: osSocket = socket.socket( sockType, socket.SOCK_STREAM ) except socket.error as e: gLogger.warn( "Exception while creating a socket:", str( e ) ) return S_ERROR( "Exception while creating a socket:%s" % str( e ) ) # osSocket.setblocking( 0 ) if timeout: osSocket.settimeout( 1 ) # we try to connect 3 times with 1 second timeout try: osSocket.connect( hostAddress ) except socket.error , e: if e.args[0] == "timed out": osSocket.close() if retries: return self.__sockConnect( hostAddress, sockType, timeout, retries - 1 ) else: return S_ERROR( "Can't connect: %s" % str( e ) ) if e.args[0] not in ( 114, 115 ): return S_ERROR( "Can't connect: %s" % str( e ) ) #Connect in progress oL = select.select( [], [ osSocket ], [], timeout )[1] if len( oL ) == 0: osSocket.close() return S_ERROR( "Connection timeout" ) errno = osSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR ) if errno != 0: return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
def __refreshAndPublish(self): self.__lastUpdateTime = time.time() gLogger.info("Refreshing from master server") from DIRAC.Core.DISET.RPCClient import RPCClient sMasterServer = gConfigurationData.getMasterServer() if sMasterServer: oClient = RPCClient( sMasterServer, timeout=self.__timeout, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck()) dRetVal = _updateFromRemoteLocation(oClient) if not dRetVal['OK']: gLogger.error("Can't update from master server", dRetVal['Message']) return False if gConfigurationData.getAutoPublish(): gLogger.info("Publishing to master server...") dRetVal = oClient.publishSlaveServer(self.__url) if not dRetVal['OK']: gLogger.error("Can't publish to master server", dRetVal['Message']) return True else: gLogger.warn( "No master server is specified in the configuration, trying to get data from other slaves" ) return self.__refresh()['OK']
def __init__(self, loadDefaultCFG=True): envVar = os.environ.get("DIRAC_FEWER_CFG_LOCKS", "no").lower() self.__locksEnabled = envVar not in ("y", "yes", "t", "true", "on", "1") if self.__locksEnabled: lr = LockRing() self.threadingEvent = lr.getEvent() self.threadingEvent.set() self.threadingLock = lr.getLock() self.runningThreadsNumber = 0 self.__compressedConfigurationData = None self.configurationPath = "/DIRAC/Configuration" self.backupsDir = os.path.join(DIRAC.rootPath, "etc", "csbackup") self._isService = False self.localCFG = CFG() self.remoteCFG = CFG() self.mergedCFG = CFG() self.remoteServerList = [] if loadDefaultCFG: defaultCFGFile = os.path.join(DIRAC.rootPath, "etc", "dirac.cfg") gLogger.debug("dirac.cfg should be at", "%s" % defaultCFGFile) retVal = self.loadFile(defaultCFGFile) if not retVal["OK"]: gLogger.warn("Can't load %s file" % defaultCFGFile) self.sync()
def __sockConnect( self, hostAddress, sockType, timeout, retries ): try: osSocket = socket.socket( sockType, socket.SOCK_STREAM ) except socket.error as e: gLogger.warn( "Exception while creating a socket:", str( e ) ) return S_ERROR( "Exception while creating a socket:%s" % str( e ) ) # osSocket.setblocking( 0 ) if timeout: tsocket = self.getSocketTimeout() gLogger.debug( "Connection timeout set to: ", tsocket ) osSocket.settimeout( tsocket ) # we try to connect 3 times with 1 second timeout try: osSocket.connect( hostAddress ) except socket.error , e: if e.args[0] == "timed out": osSocket.close() if retries: return self.__sockConnect( hostAddress, sockType, timeout, retries - 1 ) else: return S_ERROR( "Can't connect: %s" % str( e ) ) if e.args[0] not in ( 114, 115 ): return S_ERROR( "Can't connect: %s" % str( e ) ) #Connect in progress oL = select.select( [], [ osSocket ], [], timeout )[1] if len( oL ) == 0: osSocket.close() return S_ERROR( "Connection timeout" ) errno = osSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR ) if errno != 0: return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
def _refreshAndPublish(self): """ Refresh configuration and publish local updates """ self._lastUpdateTime = time.time() gLogger.info("Refreshing from master server") sMasterServer = gConfigurationData.getMasterServer() if sMasterServer: from DIRAC.ConfigurationSystem.Client.ConfigurationClient import ConfigurationClient oClient = ConfigurationClient( url=sMasterServer, timeout=self._timeout, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck(), ) dRetVal = _updateFromRemoteLocation(oClient) if not dRetVal["OK"]: gLogger.error("Can't update from master server", dRetVal["Message"]) return False if gConfigurationData.getAutoPublish(): gLogger.info("Publishing to master server...") dRetVal = oClient.publishSlaveServer(self._url) if not dRetVal["OK"]: gLogger.error("Can't publish to master server", dRetVal["Message"]) return True else: gLogger.warn( "No master server is specified in the configuration, trying to get data from other slaves" ) return self._refresh()["OK"]
def __backupCurrentConfiguration(self, backupName): configurationFilename = "%s.cfg" % self.getName() configurationFile = os.path.join(DIRAC.rootPath, "etc", configurationFilename) today = Time.date() backupPath = os.path.join(self.getBackupDir(), str(today.year), "%02d" % today.month) mkDir(backupPath) backupFile = os.path.join( backupPath, configurationFilename.replace(".cfg", ".%s.zip" % backupName)) if os.path.isfile(configurationFile): gLogger.info("Making a backup of configuration in %s" % backupFile) try: with zipfile.ZipFile(backupFile, "w", zipfile.ZIP_DEFLATED) as zf: zf.write( configurationFile, "%s.backup.%s" % (os.path.split(configurationFile)[1], backupName)) except Exception: gLogger.exception() gLogger.error("Cannot backup configuration data file", "file %s" % backupFile) else: gLogger.warn("CS data file does not exist", configurationFile)
def __refresh(self): self.__lastUpdateTime = time.time() gLogger.debug("Refreshing configuration...") gatewayList = getGatewayURLs("Configuration/Server") updatingErrorsList = [] if gatewayList: lInitialListOfServers = gatewayList gLogger.debug("Using configuration gateway", str(lInitialListOfServers[0])) else: lInitialListOfServers = gConfigurationData.getServers() gLogger.debug("Refreshing from list %s" % str(lInitialListOfServers)) lRandomListOfServers = List.randomize(lInitialListOfServers) gLogger.debug("Randomized server list is %s" % ", ".join(lRandomListOfServers)) for sServer in lRandomListOfServers: from DIRAC.Core.DISET.RPCClient import RPCClient oClient = RPCClient( sServer, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck()) dRetVal = _updateFromRemoteLocation(oClient) if dRetVal['OK']: return dRetVal else: updatingErrorsList.append(dRetVal['Message']) gLogger.warn( "Can't update from server", "Error while updating from %s: %s" % (sServer, dRetVal['Message'])) return S_ERROR("Reason(s):\n\t%s" % "\n\t".join(List.uniqueElements(updatingErrorsList)))
def __refresh( self ): self.__lastUpdateTime = time.time() gLogger.debug( "Refreshing configuration..." ) gatewayList = getGatewayURLs( "Configuration/Server" ) updatingErrorsList = [] if gatewayList: lInitialListOfServers = gatewayList gLogger.debug( "Using configuration gateway", str( lInitialListOfServers[0] ) ) else: lInitialListOfServers = gConfigurationData.getServers() gLogger.debug( "Refreshing from list %s" % str( lInitialListOfServers ) ) lRandomListOfServers = List.randomize( lInitialListOfServers ) gLogger.debug( "Randomized server list is %s" % ", ".join( lRandomListOfServers ) ) for sServer in lRandomListOfServers: from DIRAC.Core.DISET.RPCClient import RPCClient oClient = RPCClient( sServer, useCertificates = gConfigurationData.useServerCertificate(), skipCACheck = gConfigurationData.skipCACheck() ) dRetVal = _updateFromRemoteLocation( oClient ) if dRetVal[ 'OK' ]: return dRetVal else: updatingErrorsList.append( dRetVal[ 'Message' ] ) gLogger.warn( "Can't update from server", "Error while updating from %s: %s" % ( sServer, dRetVal[ 'Message' ] ) ) if dRetVal[ 'Message' ].find( "Insane environment" ) > -1: break return S_ERROR( "Reason(s):\n\t%s" % "\n\t".join( List.uniqueElements( updatingErrorsList ) ) )
def updateConfiguration(self, sBuffer, committer="", updateVersionOption=False): """ Update the master configuration with the newly received changes :param str sBuffer: newly received configuration data :param str committer: the user name of the committer :param bool updateVersionOption: flag to update the current configuration version :return: S_OK/S_ERROR of the write-to-disk of the new configuration """ if not gConfigurationData.isMaster(): return S_ERROR("Configuration modification is not allowed in this server") # Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) # Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR("Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion)) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result['OK']: gLogger.warn("Could not AutoMerge!", result['Message']) return S_ERROR("AutoMerge failed: %s" % result['Message']) requestedRemoteCFG = result['Value'] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) # Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) # Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() # self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk") retVal = gConfigurationData.writeRemoteConfigurationToDisk("%s@%s" % (committer, gConfigurationData.getVersion())) gLogger.info("New version", gConfigurationData.getVersion()) # Attempt to update the configuration on currently registered slave services if gConfigurationData.getAutoSlaveSync(): result = self.forceSlavesUpdate() if not result['OK']: gLogger.warn('Failed to update slave servers') return retVal
def _clientCallback( self, conn, cert, errnum, depth, ok ): # This obviously has to be updated if depth == 0 and ok == 1: hostnameCN = cert.get_subject().commonName #if hostnameCN in ( self.infoDict[ 'hostname' ], "host/%s" % self.infoDict[ 'hostname' ] ): if self.__isSameHost( hostnameCN, self.infoDict['hostname'] ): return 1 else: gLogger.warn( "Server is not who it's supposed to be", "Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) ) return ok return ok
def updateConfiguration(self, sBuffer, commiterDN="", updateVersionOption=False): if not gConfigurationData.isMaster(): return S_ERROR( "Configuration modification is not allowed in this server") #Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) #Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion)) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result['OK']: gLogger.warn("Could not AutoMerge!", result['Message']) return S_ERROR("AutoMerge failed: %s" % result['Message']) requestedRemoteCFG = result['Value'] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) #Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) #Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() #self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk!") retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % (commiterDN, gConfigurationData.getVersion())) gLogger.info("New version it is!") return retVal
def _clientCallback(self, conn, cert, errnum, depth, ok): # This obviously has to be updated if depth == 0 and ok == 1: hostnameCN = cert.get_subject().commonName # if hostnameCN in ( self.infoDict[ 'hostname' ], "host/%s" % self.infoDict[ 'hostname' ] ): if self.__isSameHost(hostnameCN, self.infoDict['hostname']): return 1 else: gLogger.warn("Server is not who it's supposed to be", "Connecting to %s and it's %s" % (self.infoDict['hostname'], hostnameCN)) return ok return ok
def _refresh(self, fromMaster=False): """ Refresh configuration """ self._lastUpdateTime = time.time() gLogger.debug("Refreshing configuration...") gatewayList = getGatewayURLs("Configuration/Server") updatingErrorsList = [] if gatewayList: initialServerList = gatewayList gLogger.debug("Using configuration gateway", str(initialServerList[0])) elif fromMaster: masterServer = gConfigurationData.getMasterServer() initialServerList = [masterServer] gLogger.debug("Refreshing from master %s" % masterServer) else: initialServerList = gConfigurationData.getServers() gLogger.debug("Refreshing from list %s" % str(initialServerList)) # If no servers in the initial list, we are supposed to use the local configuration only if not initialServerList: return S_OK() randomServerList = List.randomize(initialServerList) gLogger.debug("Randomized server list is %s" % ", ".join(randomServerList)) for sServer in randomServerList: from DIRAC.ConfigurationSystem.Client.ConfigurationClient import ConfigurationClient oClient = ConfigurationClient( url=sServer, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck(), ) dRetVal = _updateFromRemoteLocation(oClient) if dRetVal["OK"]: self._refreshTime = gConfigurationData.getRefreshTime() return dRetVal else: updatingErrorsList.append(dRetVal["Message"]) gLogger.warn( "Can't update from server", "Error while updating from %s: %s" % (sServer, dRetVal["Message"])) if dRetVal["Message"].find("Insane environment") > -1: break return S_ERROR("Reason(s):\n\t%s" % "\n\t".join(List.uniqueElements(updatingErrorsList)))
def updateConfiguration(self, sBuffer, commiter="", updateVersionOption=False): if not gConfigurationData.isMaster(): return S_ERROR("Configuration modification is not allowed in this server") # Load the data in a ConfigurationData object oRemoteConfData = ConfigurationData(False) oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer) if updateVersionOption: oRemoteConfData.setVersion(gConfigurationData.getVersion()) # Test that remote and new versions are the same sRemoteVersion = oRemoteConfData.getVersion() sLocalVersion = gConfigurationData.getVersion() gLogger.info("Checking versions\nremote: %s\nlocal: %s" % (sRemoteVersion, sLocalVersion)) if sRemoteVersion != sLocalVersion: if not gConfigurationData.mergingEnabled(): return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion) ) else: gLogger.info("AutoMerging new data!") if updateVersionOption: return S_ERROR("Cannot AutoMerge! version was overwritten") result = self.__mergeIndependentUpdates(oRemoteConfData) if not result["OK"]: gLogger.warn("Could not AutoMerge!", result["Message"]) return S_ERROR("AutoMerge failed: %s" % result["Message"]) requestedRemoteCFG = result["Value"] gLogger.info("AutoMerge successful!") oRemoteConfData.setRemoteCFG(requestedRemoteCFG) # Test that configuration names are the same sRemoteName = oRemoteConfData.getName() sLocalName = gConfigurationData.getName() if sRemoteName != sLocalName: return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName)) # Update and generate a new version gLogger.info("Committing new data...") gConfigurationData.lock() gLogger.info("Setting the new CFG") gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG()) gConfigurationData.unlock() gLogger.info("Generating new version") gConfigurationData.generateNewVersion() # self.__checkSlavesStatus( forceWriteConfiguration = True ) gLogger.info("Writing new version to disk!") retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % (commiter, gConfigurationData.getVersion()) ) gLogger.info("New version it is!") return retVal
def loadObjects(path, reFilter=None, parentClass=None): if not reFilter: reFilter = re.compile(".*[a-z1-9]\.py$") pathList = List.fromChar(path, "/") parentModuleList = [ "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions() ] + ['DIRAC'] objectsToLoad = {} #Find which object files match for parentModule in parentModuleList: objDir = os.path.join(DIRAC.rootPath, parentModule, *pathList) if not os.path.isdir(objDir): continue for objFile in os.listdir(objDir): if reFilter.match(objFile): pythonClassName = objFile[:-3] if pythonClassName not in objectsToLoad: gLogger.info("Adding to load queue %s/%s/%s" % (parentModule, path, pythonClassName)) objectsToLoad[pythonClassName] = parentModule #Load them! loadedObjects = {} for pythonClassName in objectsToLoad: parentModule = objectsToLoad[pythonClassName] try: #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ] #And the python class name is.. well, the python class name objPythonPath = "%s.%s.%s" % (parentModule, ".".join(pathList), pythonClassName) objModule = __import__(objPythonPath, globals(), locals(), pythonClassName) objClass = getattr(objModule, pythonClassName) except Exception, e: gLogger.error("Can't load type %s/%s: %s" % (parentModule, pythonClassName, str(e))) continue if parentClass == objClass: continue if parentClass and not issubclass(objClass, parentClass): gLogger.warn("%s is not a subclass of %s. Skipping" % (objClass, parentClass)) continue gLogger.info("Loaded %s" % objPythonPath) loadedObjects[pythonClassName] = objClass
def __backupCurrentConfiguration( self, backupName ): configurationFilename = "%s.cfg" % self.getName() configurationFile = os.path.join( DIRAC.rootPath, "etc", configurationFilename ) today = Time.date() backupPath = os.path.join( self.getBackupDir(), str( today.year ), "%02d" % today.month ) mkDir(backupPath) backupFile = os.path.join( backupPath, configurationFilename.replace( ".cfg", ".%s.zip" % backupName ) ) if os.path.isfile( configurationFile ): gLogger.info( "Making a backup of configuration in %s" % backupFile ) try: with zipfile.ZipFile( backupFile, "w", zipfile.ZIP_DEFLATED ) as zf: zf.write( configurationFile, "%s.backup.%s" % ( os.path.split( configurationFile )[1], backupName ) ) except Exception: gLogger.exception() gLogger.error( "Cannot backup configuration data file", "file %s" % backupFile ) else: gLogger.warn( "CS data file does not exist", configurationFile )
def _checkSlavesStatus(self, forceWriteConfiguration=False): """ Check if Slaves server are still availlable :param forceWriteConfiguration: (default False) Force rewriting configuration after checking slaves """ gLogger.info("Checking status of slave servers") iGraceTime = gConfigurationData.getSlavesGraceTime() bModifiedSlaveServers = False for sSlaveURL in list(self.dAliveSlaveServers): if time.time() - self.dAliveSlaveServers[sSlaveURL] > iGraceTime: gLogger.warn("Found dead slave", sSlaveURL) del self.dAliveSlaveServers[sSlaveURL] bModifiedSlaveServers = True if bModifiedSlaveServers or forceWriteConfiguration: gConfigurationData.setServers(", ".join(self.dAliveSlaveServers)) self.__generateNewVersion()
def loadObjects( path, reFilter = None, parentClass = None ): if not reFilter: reFilter = re.compile( r".*[a-z1-9]\.py$" ) pathList = List.fromChar( path, "/" ) parentModuleList = [ "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions() ] + [ 'DIRAC' ] objectsToLoad = {} #Find which object files match for parentModule in parentModuleList: objDir = os.path.join( DIRAC.rootPath, parentModule, *pathList ) if not os.path.isdir( objDir ): continue for objFile in os.listdir( objDir ): if reFilter.match( objFile ): pythonClassName = objFile[:-3] if pythonClassName not in objectsToLoad: gLogger.info( "Adding to message load queue %s/%s/%s" % ( parentModule, path, pythonClassName ) ) objectsToLoad[ pythonClassName ] = parentModule #Load them! loadedObjects = {} for pythonClassName in objectsToLoad: parentModule = objectsToLoad[ pythonClassName ] try: #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ] #And the python class name is.. well, the python class name objPythonPath = "%s.%s.%s" % ( parentModule, ".".join( pathList ), pythonClassName ) objModule = __import__( objPythonPath, globals(), locals(), pythonClassName ) objClass = getattr( objModule, pythonClassName ) except Exception as e: gLogger.exception( "Can't load type %s/%s: %s" % ( parentModule, pythonClassName, str( e ) ) ) continue if parentClass == objClass: continue if parentClass and not issubclass( objClass, parentClass ): gLogger.warn( "%s is not a subclass of %s. Skipping" % ( objClass, parentClass ) ) continue gLogger.info( "Loaded %s" % objPythonPath ) loadedObjects[ pythonClassName ] = objClass return loadedObjects
def getHostNickName( self, credDict ): """ Discover the host nickname associated to the DN. The nickname will be included in the credentials dictionary. @type credDict: dictionary @param credDict: Credentials to ckeck @return: Boolean specifying whether the nickname was found """ if not self.KW_DN in credDict: return True if not self.KW_GROUP in credDict: return False retVal = CS.getHostnameForDN( credDict[ self.KW_DN ] ) if not retVal[ 'OK' ]: gLogger.warn( "Cannot find hostname for DN %s: %s" % ( credDict[ self.KW_DN ], retVal[ 'Message' ] ) ) return False credDict[ self.KW_USERNAME ] = retVal[ 'Value' ] return True
def __init__(self, loadDefaultCFG=True): self.threadingEvent = threading.Event() self.threadingEvent.set() self.threadingLock = threading.Lock() self.runningThreadsNumber = 0 self.compressedConfigurationData = "" self.configurationPath = "/DIRAC/Configuration" self.backupsDir = os.path.join(DIRAC.rootPath, "etc", "csbackup") self._isService = False self.localCFG = CFG() self.remoteCFG = CFG() self.mergedCFG = CFG() self.remoteServerList = [] if loadDefaultCFG: defaultCFGFile = os.path.join(DIRAC.rootPath, "etc", "dirac.cfg") gLogger.debug("dirac.cfg should be at", "%s" % defaultCFGFile) retVal = self.loadFile(defaultCFGFile) if not retVal['OK']: gLogger.warn("Can't load %s file" % defaultCFGFile) self.sync()
def getHostNickName( self, credDict ): """ Discover the host nickname associated to the DN. The nickname will be included in the credentials dictionary. :type credDict: dictionary :param credDict: Credentials to ckeck :return: Boolean specifying whether the nickname was found """ if self.KW_DN not in credDict: return True if self.KW_GROUP not in credDict: return False retVal = CS.getHostnameForDN( credDict[ self.KW_DN ] ) if not retVal[ 'OK' ]: gLogger.warn( "Cannot find hostname for DN %s: %s" % ( credDict[ self.KW_DN ], retVal[ 'Message' ] ) ) return False credDict[ self.KW_USERNAME ] = retVal[ 'Value' ] credDict[ self.KW_PROPERTIES ] = CS.getPropertiesForHost( credDict[ self.KW_USERNAME ], [] ) return True
def __sslHandshake( self ): start = time.time() timeout = self.infoDict[ 'timeout' ] while True: if timeout: if time.time() - start > timeout: return S_ERROR( "Handshake timeout exceeded" ) try: self.sslSocket.do_handshake() break except GSI.SSL.WantReadError: time.sleep( 0.001 ) except GSI.SSL.WantWriteError: time.sleep( 0.001 ) except GSI.SSL.Error, v: #gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) except Exception, v: gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" )
def __init__( self, loadDefaultCFG = True ): lr = LockRing() self.threadingEvent = lr.getEvent() self.threadingEvent.set() self.threadingLock = lr.getLock() self.runningThreadsNumber = 0 self.compressedConfigurationData = "" self.configurationPath = "/DIRAC/Configuration" self.backupsDir = os.path.join( DIRAC.rootPath, "etc", "csbackup" ) self._isService = False self.localCFG = CFG() self.remoteCFG = CFG() self.mergedCFG = CFG() self.remoteServerList = [] if loadDefaultCFG: defaultCFGFile = os.path.join( DIRAC.rootPath, "etc", "dirac.cfg" ) gLogger.debug( "dirac.cfg should be at", "%s" % defaultCFGFile ) retVal = self.loadFile( defaultCFGFile ) if not retVal[ 'OK' ]: gLogger.warn( "Can't load %s file" % defaultCFGFile ) self.sync()
def __refresh(self): self.__lastUpdateTime = time.time() gLogger.debug("Refreshing configuration...") gatewayList = getGatewayURLs("Configuration/Server") updatingErrorsList = [] if gatewayList: initialServerList = gatewayList gLogger.debug("Using configuration gateway", str(initialServerList[0])) else: initialServerList = gConfigurationData.getServers() gLogger.debug("Refreshing from list %s" % str(initialServerList)) # If no servers in the initial list, we are supposed to use the local configuration only if not initialServerList: return S_OK() randomServerList = List.randomize(initialServerList) gLogger.debug("Randomized server list is %s" % ", ".join(randomServerList)) for sServer in randomServerList: from DIRAC.Core.DISET.RPCClient import RPCClient oClient = RPCClient( sServer, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck()) dRetVal = _updateFromRemoteLocation(oClient) if dRetVal['OK']: return dRetVal else: updatingErrorsList.append(dRetVal['Message']) gLogger.warn( "Can't update from server", "Error while updating from %s: %s" % (sServer, dRetVal['Message'])) if dRetVal['Message'].find("Insane environment") > -1: break return S_ERROR("Reason(s):\n\t%s" % "\n\t".join(List.uniqueElements(updatingErrorsList)))
def __sslHandshake(self): """ Do the SSL Handshake :return: S_ERROR / S_OK with dictionary of user credentials """ start = time.time() timeout = self.infoDict['timeout'] while True: if timeout: if time.time() - start > timeout: return S_ERROR("Handshake timeout exceeded") try: self.sslSocket.do_handshake() break except GSI.SSL.WantReadError: time.sleep(0.001) except GSI.SSL.WantWriteError: time.sleep(0.001) except GSI.SSL.Error as v: if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn("Error while handshaking", v) return S_ERROR("Error while handshaking") except Exception as v: gLogger.warn("Error while handshaking", v) if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn("Error while handshaking", v) return S_ERROR("Error while handshaking") credentialsDict = self.gatherPeerCredentials() if self.infoDict['clientMode']: hostnameCN = credentialsDict['CN'] # if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]: if not self.__isSameHost(hostnameCN, self.infoDict['hostname']): gLogger.warn( "Server is not who it's supposed to be", "Connecting to %s and it's %s" % (self.infoDict['hostname'], hostnameCN)) gLogger.debug("", "Authenticated peer (%s)" % credentialsDict['DN']) return S_OK(credentialsDict)
def __refresh(self): self.__lastUpdateTime = time.time() gLogger.debug("Refreshing configuration...") gatewayList = getGatewayURLs("Configuration/Server") updatingErrorsList = [] if gatewayList: initialServerList = gatewayList gLogger.debug("Using configuration gateway", str(initialServerList[0])) else: initialServerList = gConfigurationData.getServers() gLogger.debug("Refreshing from list %s" % str(initialServerList)) # If no servers in the initial list, we are supposed to use the local configuration only if not initialServerList: return S_OK() randomServerList = List.randomize(initialServerList) gLogger.debug("Randomized server list is %s" % ", ".join(randomServerList)) for sServer in randomServerList: from DIRAC.Core.DISET.RPCClient import RPCClient oClient = RPCClient( sServer, useCertificates=gConfigurationData.useServerCertificate(), skipCACheck=gConfigurationData.skipCACheck(), ) dRetVal = _updateFromRemoteLocation(oClient) if dRetVal["OK"]: return dRetVal else: updatingErrorsList.append(dRetVal["Message"]) gLogger.warn( "Can't update from server", "Error while updating from %s: %s" % (sServer, dRetVal["Message"]) ) if dRetVal["Message"].find("Insane environment") > -1: break return S_ERROR("Reason(s):\n\t%s" % "\n\t".join(List.uniqueElements(updatingErrorsList)))
def __sslHandshake( self ): """ Do the SSL Handshake :return: S_ERROR / S_OK with dictionary of user credentials """ start = time.time() timeout = self.infoDict[ 'timeout' ] while True: if timeout: if time.time() - start > timeout: return S_ERROR( "Handshake timeout exceeded" ) try: self.sslSocket.do_handshake() break except GSI.SSL.WantReadError: time.sleep( 0.001 ) except GSI.SSL.WantWriteError: time.sleep( 0.001 ) except GSI.SSL.Error, v: if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) except Exception, v: gLogger.warn( "Error while handshaking", v ) if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" )
def __sslHandshake(self): """ Do the SSL Handshake :return: S_ERROR / S_OK with dictionary of user credentials """ start = time.time() timeout = self.infoDict['timeout'] while True: if timeout: if time.time() - start > timeout: return S_ERROR("Handshake timeout exceeded") try: self.sslSocket.do_handshake() break except GSI.SSL.WantReadError: time.sleep(0.001) except GSI.SSL.WantWriteError: time.sleep(0.001) except GSI.SSL.Error, v: if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn("Error while handshaking", v) return S_ERROR("Error while handshaking") except Exception, v: gLogger.warn("Error while handshaking", v) if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn("Error while handshaking", v) return S_ERROR("Error while handshaking")
def _connect(self): """ Establish the connection. It uses the URL discovered in __discoverURL. In case the connection cannot be established, __discoverURL is called again, and _connect calls itself. We stop after trying self.__nbOfRetry * self.__nbOfUrls :return: S_OK()/S_ERROR() """ # Check if the useServerCertificate configuration changed # Note: I am not really sure that all this block makes # any sense at all since all these variables are # evaluated in __discoverCredentialsToUse if gConfig.useServerCertificate() != self.__useCertificates: if self.__forceUseCertificates is None: self.__useCertificates = gConfig.useServerCertificate() self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates # The server certificate use context changed, rechecking the transport sanity result = self.__checkTransportSanity() if not result['OK']: return result # Take all the extra credentials self.__discoverExtraCredentials() if not self.__initStatus['OK']: return self.__initStatus if self.__enableThreadCheck: self.__checkThreadID() gLogger.debug("Trying to connect to: %s" % self.serviceURL) try: # Calls the transport method of the apropriate protocol. # self.__URLTuple[1:3] = [server name, port, System/Component] transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs) # the socket timeout is the default value which is 1. # later we increase to 5 retVal = transport.initAsClient() # We try at most __nbOfRetry each URLs if not retVal['OK']: gLogger.warn("Issue getting socket:", "%s : %s : %s" % (transport, self.__URLTuple, retVal['Message'])) # We try at most __nbOfRetry each URLs if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1: # Recompose the URL (why not using self.serviceURL ? ) url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3]) # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so) if url not in self.__bannedUrls: gLogger.warn("Non-responding URL temporarily banned", "%s" % url) self.__bannedUrls += [url] # Increment the retry counter self.__retry += 1 # 16.07.20 CHRIS: I guess this setSocketTimeout does not behave as expected. # If the initasClient did not work, we anyway re-enter the whole method, # so a new transport object is created. # However, it migh be that this timeout value was propagated down to the # SocketInfoFactory singleton, and thus used, but that means that the timeout # specified in parameter was then void. # If it is our last attempt for each URL, we increase the timeout if self.__retryCounter == self.__nbOfRetry - 1: transport.setSocketTimeout(5) # we increase the socket timeout in case the network is not good gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL)) # If we tried all the URL, we increase the global counter (__retryCounter), and sleep if len(self.__bannedUrls) == self.__nbOfUrls: self.__retryCounter += 1 # we run only one service! In that case we increase the retry delay. self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2 gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay) time.sleep(self.__retryDelay) # rediscover the URL self.__discoverURL() # try to reconnect return self._connect() else: return retVal except Exception as e: gLogger.exception(lException=True, lExcInfo=True) return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e))) # We add the connection to the transport pool gLogger.debug("Connected to: %s" % self.serviceURL) trid = getGlobalTransportPool().add(transport) return S_OK((trid, transport))
time.sleep( 0.001 ) except GSI.SSL.WantWriteError: time.sleep( 0.001 ) except GSI.SSL.Error, v: if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) except Exception, v: gLogger.warn( "Error while handshaking", v ) if self.__retry < 3: self.__retry += 1 return self.__sslHandshake() else: # gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) credentialsDict = self.gatherPeerCredentials() if self.infoDict[ 'clientMode' ]: hostnameCN = credentialsDict[ 'CN' ] #if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]: if not self.__isSameHost( hostnameCN, self.infoDict[ 'hostname' ] ): gLogger.warn( "Server is not who it's supposed to be", "Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) ) gLogger.debug( "", "Authenticated peer (%s)" % credentialsDict[ 'DN' ] ) return S_OK( credentialsDict )
def __sslHandshake( self ): start = time.time() timeout = self.infoDict[ 'timeout' ] while True: if timeout: if time.time() - start > timeout: return S_ERROR( "Handshake timeout exceeded" ) try: self.sslSocket.do_handshake() break except GSI.SSL.WantReadError: time.sleep( 0.001 ) except GSI.SSL.WantWriteError: time.sleep( 0.001 ) except GSI.SSL.Error, v: #gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) ) gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) except Exception, v: gLogger.warn( "Error while handshaking", v ) return S_ERROR( "Error while handshaking" ) credentialsDict = self.gatherPeerCredentials() if self.infoDict[ 'clientMode' ]: hostnameCN = credentialsDict[ 'CN' ] #if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]: if not self.__isSameHost( hostnameCN, self.infoDict[ 'hostname' ] ): gLogger.warn( "Server is not who it's supposed to be", "Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) ) gLogger.debug( "", "Authenticated peer (%s)" % credentialsDict[ 'DN' ] ) return S_OK( credentialsDict )
def __processResults(self, id_, result): if result['OK']: self.__updateResultDict['Successful'][result['URL']] = True else: gLogger.warn("Failed to update configuration on", result['URL'] + ':' + result['Message']) self.__updateResultDict['Failed'][result['URL']] = result['Message']
def initDIRAC(rootPath, enableDebug=False): # CONFIGURATION OPTIONS HERE (note: all config options will override # any Pylons config options) configDict = {'webConfig': {}} configDict['webConfig']['dirac.webroot'] = rootPath diracRootPath = os.path.realpath(os.path.dirname( os.path.dirname(rootPath))) configDict['webConfig']['dirac.root'] = diracRootPath if diracRootPath not in sys.path: sys.path.append(diracRootPath) from DIRAC.FrameworkSystem.Client.Logger import gLogger gLogger.registerBackends(['stderr']) from DIRAC.Core.Base import Script Script.registerSwitch("r", "reload", "Reload for pylons") Script.localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes") Script.localCfg.addDefaultEntry("LogColor", True) Script.initialize(script="Website", ignoreErrors=True, initializeMonitor=False) gLogger._systemName = "Framework" gLogger.initialize("Web", "/Website") gLogger.setLevel("VERBOSE") from DIRAC import gMonitor, gConfig, rootPath as droot from DIRAC.Core.Utilities import CFG from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions gMonitor.setComponentType(gMonitor.COMPONENT_WEB) gMonitor.initialize() gMonitor.registerActivity("pagesServed", "Pages served", "Framework", "pages", gMonitor.OP_SUM) gLogger.info("DIRAC Initialized") configDict['portalVersion'] = portalVersion(rootPath) gLogger.info("DIRAC portal version: %s" % configDict['portalVersion']) extModules = ['%sDIRAC' % module for module in getCSExtensions()] #Load web.cfg of modules cfgFilePaths = [os.path.join(droot, "etc", "web.cfg")] for extModule in extModules: gLogger.info("Adding web.cfg for %s extension" % extModule) extModulePath = os.path.join(diracRootPath, extModule) webCFGPath = os.path.join(extModulePath, "Web", "web.cfg") cfgFilePaths.append(webCFGPath) for systemDir in os.listdir(extModulePath): webCFGSystemPath = os.path.join(extModulePath, systemDir, "Web", "web.cfg") cfgFilePaths.append(webCFGSystemPath) webCFG = CFG.CFG() for webCFGPath in cfgFilePaths: if not os.path.isfile(webCFGPath): gLogger.warn("%s does not exist" % webCFGPath) else: gLogger.info("Loading %s" % webCFGPath) modCFG = CFG.CFG().loadFromFile(webCFGPath) if modCFG.getOption('Website/AbsoluteDefinition', False): gLogger.info("CFG %s is absolute" % webCFGPath) webCFG = modCFG else: webCFG = webCFG.mergeWith(modCFG) gConfig.loadCFG(webCFG) gLogger.showHeaders(True) gLogger._gLogger__initialized = False gLogger.initialize("Web", "/Website") #Define the controllers, templates and public directories for type in ('controllers', 'templates', 'public'): configDict[type] = [] for extModule in extModules: extModulePath = os.path.join(diracRootPath, extModule) typePath = os.path.join(extModulePath, "Web", type) if os.path.isdir(typePath): gLogger.info("Adding %s path for module %s" % (type, extModule)) configDict[type].append(typePath) for systemDir in os.listdir(extModulePath): systemTypePath = os.path.join(extModulePath, systemDir, "Web", type) if os.path.isdir(systemTypePath): gLogger.info("Adding %s path for system %s in module %s" % (type, systemDir, extModule)) configDict[type].append(systemTypePath) #End of extensions configDict[type].append(os.path.join(rootPath, type)) #Load debug.cfg? if enableDebug: debugCFGPath = os.path.join(rootPath, "debug.cfg") if os.path.isfile(debugCFGPath): gLogger.info("Loading debug cfg file at %s" % debugCFGPath) gConfig.loadFile(debugCFGPath) gLogger.info("Extension modules loaded") return configDict
def initDIRAC( rootPath, enableDebug = False ): # CONFIGURATION OPTIONS HERE (note: all config options will override # any Pylons config options) configDict = { 'webConfig' : {} } configDict[ 'webConfig' ]['dirac.webroot'] = rootPath diracRootPath = os.path.realpath( os.path.dirname( os.path.dirname( rootPath ) ) ) configDict[ 'webConfig' ]['dirac.root'] = diracRootPath if diracRootPath not in sys.path: sys.path.append( diracRootPath ) from DIRAC.FrameworkSystem.Client.Logger import gLogger gLogger.registerBackends( [ 'stderr' ] ) from DIRAC.Core.Base import Script Script.registerSwitch( "r", "reload", "Reload for pylons" ) Script.localCfg.addDefaultEntry( "/DIRAC/Security/UseServerCertificate", "yes" ) Script.localCfg.addDefaultEntry( "LogColor", True ) Script.initialize( script = "Website", ignoreErrors = True, initializeMonitor = False ) gLogger._systemName = "Framework" gLogger.initialize( "Web", "/Website" ) gLogger.setLevel( "VERBOSE" ) from DIRAC import gMonitor, gConfig, rootPath as droot from DIRAC.Core.Utilities import CFG from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions gMonitor.setComponentType( gMonitor.COMPONENT_WEB ) gMonitor.initialize() gMonitor.registerActivity( "pagesServed", "Pages served", "Framework", "pages", gMonitor.OP_SUM ) gLogger.info( "DIRAC Initialized" ) configDict['portalVersion'] = portalVersion( rootPath ) gLogger.info( "DIRAC portal version: %s" % configDict['portalVersion'] ) extModules = [ '%sDIRAC' % module for module in getCSExtensions() ] #Load web.cfg of modules cfgFilePaths = [ os.path.join( droot, "etc", "web.cfg" ) ] for extModule in extModules: gLogger.info( "Adding web.cfg for %s extension" % extModule ) extModulePath = os.path.join( diracRootPath, extModule ) webCFGPath = os.path.join( extModulePath, "Web", "web.cfg" ) cfgFilePaths.append( webCFGPath ) for systemDir in os.listdir( extModulePath ): webCFGSystemPath = os.path.join( extModulePath, systemDir, "Web", "web.cfg" ) cfgFilePaths.append( webCFGSystemPath ) webCFG = CFG.CFG() for webCFGPath in cfgFilePaths: if not os.path.isfile( webCFGPath ): gLogger.warn( "%s does not exist" % webCFGPath ) else: gLogger.info( "Loading %s" % webCFGPath ) modCFG = CFG.CFG().loadFromFile( webCFGPath ) if modCFG.getOption( 'Website/AbsoluteDefinition', False ): gLogger.info( "CFG %s is absolute" % webCFGPath ) webCFG = modCFG else: webCFG = webCFG.mergeWith( modCFG ) gConfig.loadCFG( webCFG ) gLogger.showHeaders( True ) gLogger._gLogger__initialized = False gLogger.initialize( "Web", "/Website" ) #Define the controllers, templates and public directories for type in ( 'controllers', 'templates', 'public' ): configDict[ type ] = [] for extModule in extModules: extModulePath = os.path.join( diracRootPath, extModule ) typePath = os.path.join( extModulePath, "Web", type ) if os.path.isdir( typePath ): gLogger.info( "Adding %s path for module %s" % ( type, extModule ) ) configDict[ type ].append( typePath ) for systemDir in os.listdir( extModulePath ): systemTypePath = os.path.join( extModulePath, systemDir, "Web", type ) if os.path.isdir( systemTypePath ): gLogger.info( "Adding %s path for system %s in module %s" % ( type, systemDir, extModule ) ) configDict[ type ].append( systemTypePath ) #End of extensions configDict[ type ].append( os.path.join( rootPath, type ) ) #Load debug.cfg? if enableDebug: debugCFGPath = os.path.join( rootPath, "debug.cfg" ) if os.path.isfile( debugCFGPath ): gLogger.info( "Loading debug cfg file at %s" % debugCFGPath ) gConfig.loadFile( debugCFGPath ) gLogger.info( "Extension modules loaded" ) return configDict
def _connect(self): """ Establish the connection. It uses the URL discovered in __discoverURL. In case the connection cannot be established, __discoverURL is called again, and _connect calls itself. We stop after trying self.__nbOfRetry * self.__nbOfUrls """ # Check if the useServerCertificate configuration changed # Note: I am not really sure that all this block makes # any sense at all since all these variables are # evaluated in __discoverCredentialsToUse if gConfig.useServerCertificate() != self.__useCertificates: if self.__forceUseCertificates is None: self.__useCertificates = gConfig.useServerCertificate() self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates # The server certificate use context changed, rechecking the transport sanity result = self.__checkTransportSanity() if not result['OK']: return result # Take all the extra credentials self.__discoverExtraCredentials() if not self.__initStatus['OK']: return self.__initStatus if self.__enableThreadCheck: self.__checkThreadID() gLogger.debug("Trying to connect to: %s" % self.serviceURL) try: # Calls the transport method of the apropriate protocol. # self.__URLTuple[1:3] = [server name, port, System/Component] transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs) # the socket timeout is the default value which is 1. # later we increase to 5 retVal = transport.initAsClient() # If we have an issue connecting if not retVal['OK']: gLogger.warn("Issue getting socket:", "%s : %s : %s" % (transport, self.__URLTuple, retVal['Message'])) # We try at most __nbOfRetry each URLs if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1: # Recompose the URL (why not using self.serviceURL ? ) url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3]) # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so) if url not in self.__bannedUrls: gLogger.warn("Non-responding URL temporarily banned", "%s" % url) self.__bannedUrls += [url] # Increment the retry counter self.__retry += 1 # If it is our last attempt for each URL, we increase the timeout if self.__retryCounter == self.__nbOfRetry - 1: transport.setSocketTimeout(5) # we increase the socket timeout in case the network is not good gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL)) # If we tried all the URL, we increase the global counter (__retryCounter), and sleep if len(self.__bannedUrls) == self.__nbOfUrls: self.__retryCounter += 1 # we run only one service! In that case we increase the retry delay. self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2 gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay) time.sleep(self.__retryDelay) # rediscover the URL self.__discoverURL() # try to reconnect return self._connect() else: return retVal except Exception as e: gLogger.exception(lException=True, lExcInfo=True) return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e))) # We add the connection to the transport pool gLogger.debug("Connected to: %s" % self.serviceURL) trid = getGlobalTransportPool().add(transport) return S_OK((trid, transport))