def __RPCCallFunction(self, method, args): realMethod = "export_%s" % method gLogger.debug("RPC to %s" % realMethod) try: oMethod = getattr(self, realMethod) except: return S_ERROR("Unknown method %s" % method) dRetVal = self.__checkExpectedArgumentTypes(method, args) if not dRetVal['OK']: return dRetVal self.__lockManager.lock("RPC/%s" % method) self.__msgBroker.addTransportId(self.__trid, self.serviceInfoDict['serviceName'], idleRead=True) try: try: uReturnValue = oMethod(*args) return uReturnValue finally: self.__lockManager.unlock("RPC/%s" % method) self.__msgBroker.removeTransport(self.__trid, closeTransport=False) except Exception, v: gLogger.exception("Uncaught exception when serving RPC", "Function %s" % method) return S_ERROR("Server error while serving %s: %s" % (method, str(v)))
def _rh_executeConnectionCallback(self, methodName, args=False): self.__logRemoteQuery("Connection/%s" % methodName, args) if methodName not in RequestHandler.__connectionCallbackTypes: return S_ERROR("Invalid connection method %s" % methodName) cbTypes = RequestHandler.__connectionCallbackTypes[methodName] if args: if len(args) != len(cbTypes): return S_ERROR("Expected %s arguments" % len(cbTypes)) for i in range(len(cbTypes)): if type(args[i]) != cbTypes[i]: return S_ERROR("Invalid type for argument %s" % i) self.__trPool.associateData(self.__trid, "connectData", args) if not args: args = self.__trPool.getAssociatedData(self.__trid, "connectData") realMethod = "conn_%s" % methodName gLogger.debug("Callback to %s" % realMethod) try: oMethod = getattr(self, realMethod) except: #No callback defined by handler return S_OK() try: if args: uReturnValue = oMethod(self.__trid, *args) else: uReturnValue = oMethod(self.__trid) return uReturnValue except Exception, v: gLogger.exception("Uncaught exception when serving Connect", "Function %s" % realMethod) return S_ERROR("Server error while serving %s: %s" % (methodName, str(v)))
def _rh_executeMessageCallback(self, msgObj): msgName = msgObj.getName() if not self.__msgBroker.getMsgFactory().messageExists( self.__svcName, msgName): return S_ERROR("Unknown message %s" % msgName) methodName = "msg_%s" % msgName self.__logRemoteQuery("Message/%s" % methodName, msgObj.dumpAttrs()) startTime = time.time() try: oMethod = getattr(self, methodName) except: return S_ERROR("Handler function for message %s does not exist!" % msgName) self.__lockManager.lock(methodName) try: try: uReturnValue = oMethod(msgObj) except Exception, v: gLogger.exception("Uncaught exception when serving message", methodName) return S_ERROR("Server error while serving %s: %s" % (msgName, str(v))) finally: self.__lockManager.unlock(methodName) if not isReturnStructure(uReturnValue): gLogger.error("Message does not return a S_OK/S_ERROR", msgName) uReturnValue = S_ERROR( "Message %s does not return a S_OK/S_ERROR" % msgName) self.__logRemoteQueryResponse(uReturnValue, time.time() - startTime) return uReturnValue
def DataSourceToNetwork(self, dataSource): if "read" not in dir(dataSource): return S_ERROR( "%s data source object does not have a read method" % str(dataSource)) self.__oMD5 = hashlib.md5() iPacketSize = self.packetSize self.__fileBytes = 0 sentBytes = 0 try: sBuffer = dataSource.read(iPacketSize) while len(sBuffer) > 0: dRetVal = self.sendData(sBuffer) if not dRetVal["OK"]: return dRetVal if "AbortTransfer" in dRetVal and dRetVal["AbortTransfer"]: self.__log.verbose("Transfer aborted") return S_OK() sentBytes += len(sBuffer) sBuffer = dataSource.read(iPacketSize) self.sendEOF() except Exception as e: gLogger.exception("Error while sending file") return S_ERROR("Error while sending file: %s" % str(e)) self.__fileBytes = sentBytes return S_OK()
def process( self ): """ execute task :param self: self reference """ self.__done = True try: ## it's a function? if type( self.__taskFunction ) is FunctionType: self.__taskResult = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs ) ## or a class? elif type( self.__taskFunction ) in ( TypeType, ClassType ): ## create new instance taskObj = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs ) ### check if it is callable, raise TypeError if not if not callable( taskObj ): raise TypeError( "__call__ operator not defined not in %s class" % taskObj.__class__.__name__ ) ### call it at least self.__taskResult = taskObj() except Exception, x: self.__exceptionRaised = True if gLogger: gLogger.exception( "Exception in process of pool" ) if self.__exceptionCallback or self.usePoolCallbacks(): retDict = S_ERROR( 'Exception' ) retDict['Value'] = str( x ) retDict['Exc_info'] = sys.exc_info()[1] self.__taskException = retDict
def __cbRecvMsg(self, trid, msgObj): msgName = msgObj.getName() msgObj.setMsgClient(self) for cb in self.__specialCallbacks['msg']: try: result = cb(self, msgObj) if not isReturnStructure(result): gLogger.error("Callback for message does not return S_OK/S_ERROR", msgObj.getName()) return S_ERROR("No response") if not result['OK']: return result # If no specific callback but a generic one, return the generic one if msgName not in self.__callbacks: return result except BaseException: gLogger.exception("Exception while processing callbacks", msgObj.getName()) if msgName not in self.__callbacks: return S_ERROR("Unexpected message") try: result = self.__callbacks[msgName](msgObj) if not isReturnStructure(result): gLogger.error("Callback for message does not return S_OK/S_ERROR", msgName) return S_ERROR("No response") return result except BaseException: gLogger.exception("Exception while processing callbacks", msgName) return S_ERROR("No response")
def __backupCurrentConfiguration(self, backupName): configurationFilename = "%s.cfg" % self.getName() configurationFile = os.path.join(DIRAC.rootPath, "etc", configurationFilename) today = Time.date() backupPath = os.path.join(self.getBackupDir(), str(today.year), "%02d" % today.month) mkDir(backupPath) backupFile = os.path.join( backupPath, configurationFilename.replace(".cfg", ".%s.zip" % backupName)) if os.path.isfile(configurationFile): gLogger.info("Making a backup of configuration in %s" % backupFile) try: with zipfile.ZipFile(backupFile, "w", zipfile.ZIP_DEFLATED) as zf: zf.write( configurationFile, "%s.backup.%s" % (os.path.split(configurationFile)[1], backupName)) except Exception: gLogger.exception() gLogger.error("Cannot backup configuration data file", "file %s" % backupFile) else: gLogger.warn("CS data file does not exist", configurationFile)
def generateContext(ftsServer, ucert, lifetime=25200): """This method generates an fts3 context :param ftsServer: address of the fts3 server :param ucert: the path to the certificate to be used :param lifetime: duration (in sec) of the delegation to the FTS3 server (default is 7h, like FTS3 default) :returns: an fts3 context """ try: context = fts3.Context(endpoint=ftsServer, ucert=ucert, request_class=ftsSSLRequest, verify=False) # Explicitely delegate to be sure we have the lifetime we want # Note: the delegation will re-happen only when the FTS server # decides that there is not enough timeleft. # At the moment, this is 1 hour, which effectively means that if you do # not submit a job for more than 1h, you have no valid proxy in FTS servers # anymore. In future release of FTS3, the delegation will be triggered when # one third of the lifetime will be left. # Also, the proxy given as parameter might have less than "lifetime" left # since it is cached, but it does not matter, because in the FTS3Agent # we make sure that we renew it often enough # Finally, FTS3 has an issue with handling the lifetime of the proxy, # because it does not check all the chain. This is under discussion # https://its.cern.ch/jira/browse/FTS-1575 fts3.delegate(context, lifetime=datetime.timedelta(seconds=lifetime)) return S_OK(context) except FTS3ClientException as e: gLogger.exception("Error generating context", repr(e)) return S_ERROR(repr(e))
def _rh_executeMessageCallback( self, msgObj ): msgName = msgObj.getName() if not self.__msgBroker.getMsgFactory().messageExists( self.__svcName, msgName ): return S_ERROR( "Unknown message %s" % msgName ) methodName = "msg_%s" % msgName self.__logRemoteQuery( "Message/%s" % methodName, msgObj.dumpAttrs() ) startTime = time.time() try: oMethod = getattr( self, methodName ) except: return S_ERROR( "Handler function for message %s does not exist!" % msgName ) self.__lockManager.lock( methodName ) try: try: uReturnValue = oMethod( msgObj ) except Exception, v: gLogger.exception( "Uncaught exception when serving message", methodName ) return S_ERROR( "Server error while serving %s: %s" % ( msgName, str( v ) ) ) finally: self.__lockManager.unlock( methodName ) if not isReturnStructure( uReturnValue ): gLogger.error( "Message does not return a S_OK/S_ERROR", msgName ) uReturnValue = S_ERROR( "Message %s does not return a S_OK/S_ERROR" % msgName ) self.__logRemoteQueryResponse( uReturnValue, time.time() - startTime ) return uReturnValue
def process(self): """ execute task :param self: self reference """ self.__done = True try: ## it's a function? if type(self.__taskFunction) is FunctionType: self.__taskResult = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs) ## or a class? elif type(self.__taskFunction) in (TypeType, ClassType): ## create new instance taskObj = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs) ### check if it is callable, raise TypeError if not if not callable(taskObj): raise TypeError( "__call__ operator not defined not in %s class" % taskObj.__class__.__name__) ### call it at least self.__taskResult = taskObj() except Exception, x: self.__exceptionRaised = True if gLogger: gLogger.exception("Exception in process of pool") if self.__exceptionCallback or self.usePoolCallbacks(): retDict = S_ERROR('Exception') retDict['Value'] = str(x) retDict['Exc_info'] = sys.exc_info()[1] self.__taskException = retDict
def __realTrigger( self, eventName, params ): gEventSync.lock() try: if eventName not in self.__events: return S_ERROR( "Event %s is not registered" % eventName ) if eventName in self.__processingEvents: return S_OK( 0 ) eventFunctors = list( self.__events[ eventName ] ) self.__processingEvents.add( eventName ) finally: gEventSync.unlock() finalResult = S_OK() for functor in eventFunctors: try: result = functor( eventName, params ) except Exception: gLogger.exception( "Listener %s for event %s raised an exception" % ( functor.__name__, eventName ) ) continue if type( result ) != types.DictType or 'OK' not in result: gLogger.error( "Listener for event did not return a S_OK/S_ERROR structure", "%s %s" % ( functor.__name__, eventName ) ) continue if not result[ 'OK' ]: finalResult = result break gEventSync.lock() try: self.__processingEvents.discard( eventName ) finally: try: gEventSync.unlock() except: pass if not finalResult[ 'OK' ]: return finalResult return S_OK( len( eventFunctors ) )
def _rh_executeConnectionCallback( self, methodName, args = False ): self.__logRemoteQuery( "Connection/%s" % methodName, args ) if methodName not in RequestHandler.__connectionCallbackTypes: return S_ERROR( "Invalid connection method %s" % methodName ) cbTypes = RequestHandler.__connectionCallbackTypes[ methodName ] if args: if len( args ) != len( cbTypes ): return S_ERROR( "Expected %s arguments" % len( cbTypes ) ) for i in range( len( cbTypes ) ): if type( args[ i ] ) != cbTypes[i]: return S_ERROR( "Invalid type for argument %s" % i ) self.__trPool.associateData( self.__trid, "connectData", args ) if not args: args = self.__trPool.getAssociatedData( self.__trid, "connectData" ) realMethod = "conn_%s" % methodName gLogger.debug( "Callback to %s" % realMethod ) try: oMethod = getattr( self, realMethod ) except: #No callback defined by handler return S_OK() try: if args: uReturnValue = oMethod( self.__trid, *args ) else: uReturnValue = oMethod( self.__trid ) return uReturnValue except Exception, v: gLogger.exception( "Uncaught exception when serving Connect", "Function %s" % realMethod ) return S_ERROR( "Server error while serving %s: %s" % ( methodName, str( v ) ) )
def __doFileTransfer(self, sDirection): """ Execute a file transfer action :type sDirection: string :param sDirection: Direction of the transfer :return: S_OK/S_ERROR """ retVal = self.__trPool.receive(self.__trid) if not retVal["OK"]: raise ConnectionError( "Error while receiving file description %s %s" % (self.srv_getFormattedRemoteCredentials(), retVal["Message"]) ) # Reconvert to tuple fileInfo = tuple(retVal["Value"]) sDirection = "%s%s" % (sDirection[0].lower(), sDirection[1:]) if "transfer_%s" % sDirection not in dir(self): self.__trPool.send(self.__trid, S_ERROR("Service can't transfer files %s" % sDirection)) return retVal = self.__trPool.send(self.__trid, S_OK("Accepted")) if not retVal["OK"]: return retVal self.__logRemoteQuery("FileTransfer/%s" % sDirection, fileInfo) self.__lockManager.lock("FileTransfer/%s" % sDirection) try: try: fileHelper = FileHelper(self.__trPool.get(self.__trid)) if sDirection == "fromClient": fileHelper.setDirection("fromClient") uRetVal = self.transfer_fromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper) elif sDirection == "toClient": fileHelper.setDirection("toClient") uRetVal = self.transfer_toClient(fileInfo[0], fileInfo[1], fileHelper) elif sDirection == "bulkFromClient": fileHelper.setDirection("fromClient") uRetVal = self.transfer_bulkFromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper) elif sDirection == "bulkToClient": fileHelper.setDirection("toClient") uRetVal = self.transfer_bulkToClient(fileInfo[0], fileInfo[1], fileHelper) elif sDirection == "listBulk": fileHelper.setDirection("toClient") uRetVal = self.transfer_listBulk(fileInfo[0], fileInfo[1], fileHelper) else: return S_ERROR("Direction %s does not exist!!!" % sDirection) if uRetVal["OK"] and not fileHelper.finishedTransmission(): gLogger.error("You haven't finished receiving/sending the file", str(fileInfo)) return S_ERROR("Incomplete transfer") del fileHelper return uRetVal finally: self.__lockManager.unlock("FileTransfer/%s" % sDirection) except Exception as e: # pylint: disable=broad-except gLogger.exception("Uncaught exception when serving Transfer", "%s" % sDirection, lException=e) return S_ERROR("Server error while serving %s: %s" % (sDirection, repr(e)))
def abort( exitCode, *args, **kwargs ): """ Abort execution """ try: gLogger.fatal( *args, **kwargs ) os._exit( exitCode ) except: gLogger.exception( 'Error while executing DIRAC.abort' ) os._exit( exitCode )
def abort(exitCode, *args, **kwargs): """ Abort execution """ try: gLogger.fatal(*args, **kwargs) os._exit(exitCode) except: gLogger.exception('Error while executing DIRAC.abort') os._exit(exitCode)
def __doFileTransfer(self, sDirection): """ Execute a file transfer action @type sDirection: string @param sDirection: Direction of the transfer @return: S_OK/S_ERROR """ retVal = self.__trPool.receive(self.__trid) if not retVal["OK"]: raise RequestHandler.ConnectionError( "Error while receiving file description %s %s" % (self.srv_getFormattedRemoteCredentials(), retVal["Message"]) ) fileInfo = retVal["Value"] sDirection = "%s%s" % (sDirection[0].lower(), sDirection[1:]) if "transfer_%s" % sDirection not in dir(self): self.__trPool.send(self.__trid, S_ERROR("Service can't transfer files %s" % sDirection)) return retVal = self.__trPool.send(self.__trid, S_OK("Accepted")) if not retVal["OK"]: return retVal self.__logRemoteQuery("FileTransfer/%s" % sDirection, fileInfo) self.__lockManager.lock(sDirection) try: try: fileHelper = FileHelper(self.__trPool.get(self.__trid)) if sDirection == "fromClient": fileHelper.setDirection("fromClient") uRetVal = self.transfer_fromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper) elif sDirection == "toClient": fileHelper.setDirection("toClient") uRetVal = self.transfer_toClient(fileInfo[0], fileInfo[1], fileHelper) elif sDirection == "bulkFromClient": fileHelper.setDirection("fromClient") uRetVal = self.transfer_bulkFromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper) elif sDirection == "bulkToClient": fileHelper.setDirection("toClient") uRetVal = self.transfer_bulkToClient(fileInfo[0], fileInfo[1], fileHelper) elif sDirection == "listBulk": fileHelper.setDirection("toClient") uRetVal = self.transfer_listBulk(fileInfo[0], fileInfo[1], fileHelper) else: return S_ERROR("Direction %s does not exist!!!" % sDirection) if uRetVal["OK"] and not fileHelper.finishedTransmission(): gLogger.error("You haven't finished receiving/sending the file", str(fileInfo)) return S_ERROR("Incomplete transfer") return uRetVal finally: self.__lockManager.unlock(sDirection) except Exception, v: gLogger.exception("Uncaught exception when serving Transfer", "%s" % sDirection) return S_ERROR("Server error while serving %s: %s" % (sDirection, str(v)))
def process( self ): self.__done = True try: self.__jobResult = self.__jobFunction( *self.__jobArgs, **self.__jobKwArgs ) except Exception: self.__exceptionRaised = True if not self.__exceptionCallback: if gLogger: gLogger.exception( "Exception in thread" ) else: self.__jobException = sys.exc_info()
def __doFileTransfer( self, sDirection ): """ Execute a file transfer action @type sDirection: string @param sDirection: Direction of the transfer @return: S_OK/S_ERROR """ retVal = self.__trPool.receive( self.__trid ) if not retVal[ 'OK' ]: gLogger.error( "Error while receiving file description", "%s %s" % ( self.srv_getFormattedRemoteCredentials(), retVal[ 'Message' ] ) ) return S_ERROR( "Error while receiving file description: %s" % retVal[ 'Message' ] ) fileInfo = retVal[ 'Value' ] sDirection = "%s%s" % ( sDirection[0].lower(), sDirection[1:] ) if "transfer_%s" % sDirection not in dir( self ): self.__trPool.send( self.__trid, S_ERROR( "Service can't transfer files %s" % sDirection ) ) return retVal = self.__trPool.send( self.__trid, S_OK( "Accepted" ) ) if not retVal[ 'OK' ]: return retVal self.__logRemoteQuery( "FileTransfer/%s" % sDirection, fileInfo ) self.__lockManager.lock( sDirection ) try: try: fileHelper = FileHelper( self.__trPool.get( self.__trid ) ) if sDirection == "fromClient": fileHelper.setDirection( "fromClient" ) uRetVal = self.transfer_fromClient( fileInfo[0], fileInfo[1], fileInfo[2], fileHelper ) elif sDirection == "toClient" : fileHelper.setDirection( "toClient" ) uRetVal = self.transfer_toClient( fileInfo[0], fileInfo[1], fileHelper ) elif sDirection == "bulkFromClient" : fileHelper.setDirection( "fromClient" ) uRetVal = self.transfer_bulkFromClient( fileInfo[0], fileInfo[1], fileInfo[2], fileHelper ) elif sDirection == "bulkToClient" : fileHelper.setDirection( "toClient" ) uRetVal = self.transfer_bulkToClient( fileInfo[0], fileInfo[1], fileHelper ) elif sDirection == "listBulk": fileHelper.setDirection( "toClient" ) uRetVal = self.transfer_listBulk( fileInfo[0], fileInfo[1], fileHelper ) else: return S_ERROR( "Direction %s does not exist!!!" % sDirection ) if uRetVal[ 'OK' ] and not fileHelper.finishedTransmission(): gLogger.error( "You haven't finished receiving/sending the file", str( fileInfo ) ) return S_ERROR( "Incomplete transfer" ) return uRetVal finally: self.__lockManager.unlock( sDirection ) except Exception, v: gLogger.exception( "Uncaught exception when serving Transfer", "%s" % sDirection ) return S_ERROR( "Server error while serving %s: %s" % ( sDirection, str( v ) ) )
def process( self ): self.__done = True try: self.__jobResult = self.__jobFunction( *self.__jobArgs, **self.__jobKwArgs ) except Exception as lException: self.__exceptionRaised = True if not self.__exceptionCallback: if gLogger: gLogger.exception( "Exception in thread", lException = lException ) else: self.__jobException = sys.exc_info()
def __cbDisconnect( self, trid ): if not self.__trid: return if self.__trid != trid: gLogger.error( "OOps. trid's don't match. This shouldn't happen! (%s vs %s)" % ( self.__trid, trid ) ) return S_ERROR( "OOOPS" ) for cb in self.__specialCallbacks[ 'drop' ]: try: cb( self ) except: gLogger.exception( "Exception while processing disconnect callbacks" ) self.__trid = False
def __RPCCallFunction(self, method, args): """ Check the arguments then call the RPC function :type method: string :param method: arguments sended by remote client :return: S_OK/S_ERROR """ realMethod = "export_%s" % method gLogger.debug("RPC to %s" % realMethod) try: # Get the method we are trying to call oMethod = getattr(self, realMethod) except BaseException: return S_ERROR("Unknown method %s" % method) # Check if the client sends correct arguments dRetVal = self.__checkExpectedArgumentTypes(method, args) if not dRetVal['OK']: return dRetVal # Lock the method with Semaphore to avoid too many calls at the same time self.__lockManager.lock("RPC/%s" % method) # 18.02.19 WARNING CHRIS # The line bellow adds the current transportID to the message broker # First of all, I do not see why it is doing so. # Second, this affects only one every other socket, since the # message broker selects on that one, and in the meantime, many sockets # are added and removed, without even being seen by the message broker. # Finally, there seem to be a double read on the socket: from the message broker # and from the ServiceReactor, resulting in conflict. # This is warned in the man page of "select". # it has been exhibited when testing M2Crypto. # I will comment it out, and try to put it in a separate commit when merging # self.__msgBroker.addTransportId(self.__trid, # self.serviceInfoDict['serviceName'], # idleRead=True) try: try: # Trying to execute the method uReturnValue = oMethod(*args) return uReturnValue finally: # Unlock method self.__lockManager.unlock("RPC/%s" % method) # 18.02.19 WARNING CHRIS # See comment above # self.__msgBroker.removeTransport(self.__trid, closeTransport=False) except Exception as e: gLogger.exception("Uncaught exception when serving RPC", "Function %s" % method, lException=e) return S_ERROR("Server error while serving %s: %s" % (method, str(e)))
def execute(exitCode, frame): """ Executes the callback list """ #TODO: <Adri> Disable ExitCallback until I can debug it sys.stdout.flush() sys.stderr.flush() os._exit(exitCode) for callback in gCallbackList: try: callback(exitCode) except Exception: from DIRAC.FrameworkSystem.Client.Logger import gLogger gLogger.exception("Exception while calling callback") os._exit(exitCode)
def process( self ): self.__done = True try: self.__taskResult = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs ) except Exception, x: self.__exceptionRaised = True if not self.__exceptionCallback and gLogger: gLogger.exception( "Exception in process of pool " ) if self.__exceptionCallback: retDict = S_ERROR( 'Exception' ) retDict['Value'] = str( x ) retDict['Exc_info'] = sys.exc_info()[1] self.__taskException = retDict
def execute( exitCode, frame ): """ Executes the callback list """ #TODO: <Adri> Disable ExitCallback until I can debug it sys.stdout.flush() sys.stderr.flush() os._exit( exitCode ) for callback in gCallbackList: try: callback( exitCode ) except Exception: from DIRAC.FrameworkSystem.Client.Logger import gLogger gLogger.exception( "Exception while calling callback" ) os._exit( exitCode )
def process(self): self.__done = True try: self.__taskResult = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs) except Exception, x: self.__exceptionRaised = True if not self.__exceptionCallback and gLogger: gLogger.exception("Exception in process of pool ") if self.__exceptionCallback: retDict = S_ERROR("Exception") retDict["Value"] = str(x) retDict["Exc_info"] = sys.exc_info()[1] self.__taskException = retDict
def __checkExpectedArgumentTypes(self, method, args): """ Check that the arguments received match the ones expected :type method: string :param method: Method to check against :type args: tuple :param args: Arguments to check :return: S_OK/S_ERROR """ sListName = "types_%s" % method try: oTypesList = getattr(self, sListName) except Exception: gLogger.error("There's no types info for method", "export_%s" % method) return S_ERROR( "Handler error for server %s while processing method %s" % (self.serviceInfoDict["serviceName"], method)) try: mismatch = False for iIndex in range(min(len(oTypesList), len(args))): # If None skip the parameter if oTypesList[iIndex] is None: continue # If parameter is a list or a tuple check types inside elif isinstance(oTypesList[iIndex], (tuple, list)): if not isinstance(args[iIndex], tuple(oTypesList[iIndex])): mismatch = True # else check the parameter elif not isinstance(args[iIndex], oTypesList[iIndex]): mismatch = True # Has there been a mismatch? if mismatch: sError = "Type mismatch in parameter %d (starting with param 0) Received %s, expected %s" % ( iIndex, type(args[iIndex]), str(oTypesList[iIndex]), ) return S_ERROR(sError) if len(args) < len(oTypesList): return S_ERROR("Function %s expects at least %s arguments" % (method, len(oTypesList))) except Exception as v: sError = "Error in parameter check: %s" % str(v) gLogger.exception(sError) return S_ERROR(sError) return S_OK()
def loadObjects(path, reFilter=None, parentClass=None): if not reFilter: reFilter = re.compile(".*[a-z1-9]\.py$") pathList = List.fromChar(path, "/") parentModuleList = [ "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions() ] + ['DIRAC'] objectsToLoad = {} #Find which object files match for parentModule in parentModuleList: objDir = os.path.join(DIRAC.rootPath, parentModule, *pathList) if not os.path.isdir(objDir): continue for objFile in os.listdir(objDir): if reFilter.match(objFile): pythonClassName = objFile[:-3] if pythonClassName not in objectsToLoad: gLogger.info("Adding to message load queue %s/%s/%s" % (parentModule, path, pythonClassName)) objectsToLoad[pythonClassName] = parentModule #Load them! loadedObjects = {} for pythonClassName in objectsToLoad: parentModule = objectsToLoad[pythonClassName] try: #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ] #And the python class name is.. well, the python class name objPythonPath = "%s.%s.%s" % (parentModule, ".".join(pathList), pythonClassName) objModule = __import__(objPythonPath, globals(), locals(), pythonClassName) objClass = getattr(objModule, pythonClassName) except Exception, e: gLogger.exception("Can't load type %s/%s: %s" % (parentModule, pythonClassName, str(e))) continue if parentClass == objClass: continue if parentClass and not issubclass(objClass, parentClass): gLogger.warn("%s is not a subclass of %s. Skipping" % (objClass, parentClass)) continue gLogger.info("Loaded %s" % objPythonPath) loadedObjects[pythonClassName] = objClass
def __backupCurrentConfiguration( self, backupName ): configurationFilename = "%s.cfg" % self.getName() configurationFile = os.path.join( DIRAC.rootPath, "etc", configurationFilename ) today = Time.date() backupPath = os.path.join( self.getBackupDir(), str( today.year ), "%02d" % today.month ) mkDir(backupPath) backupFile = os.path.join( backupPath, configurationFilename.replace( ".cfg", ".%s.zip" % backupName ) ) if os.path.isfile( configurationFile ): gLogger.info( "Making a backup of configuration in %s" % backupFile ) try: with zipfile.ZipFile( backupFile, "w", zipfile.ZIP_DEFLATED ) as zf: zf.write( configurationFile, "%s.backup.%s" % ( os.path.split( configurationFile )[1], backupName ) ) except Exception: gLogger.exception() gLogger.error( "Cannot backup configuration data file", "file %s" % backupFile ) else: gLogger.warn( "CS data file does not exist", configurationFile )
def generateContext(ftsServer, ucert): """ This method generates an fts3 context :param ftsServer: address of the fts3 server :param ucert: the path to the certificate to be used :returns: an fts3 context """ try: context = fts3.Context(endpoint=ftsServer, ucert=ucert, request_class=ftsSSLRequest, verify=False) return S_OK(context) except FTS3ClientException as e: gLogger.exception("Error generating context", repr(e)) return S_ERROR(repr(e))
def __checkExpectedArgumentTypes(self, method, args): """ Check that the arguments received match the ones expected @type method: string @param method: Method to check against @type args: tuple @param args: Arguments to check @return: S_OK/S_ERROR """ sListName = "types_%s" % method try: oTypesList = getattr(self, sListName) except: gLogger.error("There's no types info for method export_%s" % method) return S_ERROR( "Handler error for server %s while processing method %s" % (self.serviceInfoDict["serviceName"], method) ) try: mismatch = False for iIndex in range(min(len(oTypesList), len(args))): # If none skip a parameter if oTypesList[iIndex] == None: continue # If parameter is a list or a tuple check types inside elif type(oTypesList[iIndex]) in (types.TupleType, types.ListType): if not type(args[iIndex]) in oTypesList[iIndex]: mismatch = True # else check the parameter elif not type(args[iIndex]) == oTypesList[iIndex]: mismatch = True # Has there been a mismatch? if mismatch: sError = "Type mismatch in parameter %d (starting with param 0) Received %s, expected %s" % ( iIndex, type(args[iIndex]), str(oTypesList[iIndex]), ) return S_ERROR(sError) if len(args) < len(oTypesList): return S_ERROR("Function %s expects at least %s arguments" % (method, len(oTypesList))) except Exception, v: sError = "Error in parameter check: %s" % str(v) gLogger.exception(sError) return S_ERROR(sError)
def generateContext(ftsServer, ucert): """ This method generates an fts3 context :param ftsServer: address of the fts3 server :param ucert: the path to the certificate to be used :returns: an fts3 context """ try: context = fts3.Context( endpoint=ftsServer, ucert=ucert, request_class=ftsSSLRequest, verify=False) return S_OK(context) except FTS3ClientException as e: gLogger.exception("Error generating context", repr(e)) return S_ERROR(repr(e))
def __cbDisconnect(self, trid): if not self.__trid: return if self.__trid != trid: gLogger.error("OOps. trid's don't match. This shouldn't happen!", "(%s vs %s)" % (self.__trid, trid)) return S_ERROR("OOOPS") self.__trid = False try: self.__transport.close() except BaseException: pass for cb in self.__specialCallbacks['drop']: try: cb(self) except SystemExit: raise except BaseException: gLogger.exception("Exception while processing disconnect callbacks")
def loadObjects( path, reFilter = None, parentClass = None ): if not reFilter: reFilter = re.compile( r".*[a-z1-9]\.py$" ) pathList = List.fromChar( path, "/" ) parentModuleList = [ "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions() ] + [ 'DIRAC' ] objectsToLoad = {} #Find which object files match for parentModule in parentModuleList: objDir = os.path.join( DIRAC.rootPath, parentModule, *pathList ) if not os.path.isdir( objDir ): continue for objFile in os.listdir( objDir ): if reFilter.match( objFile ): pythonClassName = objFile[:-3] if pythonClassName not in objectsToLoad: gLogger.info( "Adding to message load queue %s/%s/%s" % ( parentModule, path, pythonClassName ) ) objectsToLoad[ pythonClassName ] = parentModule #Load them! loadedObjects = {} for pythonClassName in objectsToLoad: parentModule = objectsToLoad[ pythonClassName ] try: #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ] #And the python class name is.. well, the python class name objPythonPath = "%s.%s.%s" % ( parentModule, ".".join( pathList ), pythonClassName ) objModule = __import__( objPythonPath, globals(), locals(), pythonClassName ) objClass = getattr( objModule, pythonClassName ) except Exception as e: gLogger.exception( "Can't load type %s/%s: %s" % ( parentModule, pythonClassName, str( e ) ) ) continue if parentClass == objClass: continue if parentClass and not issubclass( objClass, parentClass ): gLogger.warn( "%s is not a subclass of %s. Skipping" % ( objClass, parentClass ) ) continue gLogger.info( "Loaded %s" % objPythonPath ) loadedObjects[ pythonClassName ] = objClass return loadedObjects
def __checkExpectedArgumentTypes(self, method, args): """ Check that the arguments received match the ones expected @type method: string @param method: Method to check against @type args: tuple @params args: Arguments to check @return: S_OK/S_ERROR """ sListName = "types_%s" % method try: oTypesList = getattr(self, sListName) except: gLogger.error("There's no types info for method export_%s" % method) return S_ERROR( "Handler error for server %s while processing method %s" % (self.serviceInfoDict['serviceName'], method)) try: mismatch = False for iIndex in range(min(len(oTypesList), len(args))): #If none skip a parameter if oTypesList[iIndex] == None: continue #If parameter is a list or a tuple check types inside elif type(oTypesList[iIndex]) in (types.TupleType, types.ListType): if not type(args[iIndex]) in oTypesList[iIndex]: mismatch = True #else check the parameter elif not type(args[iIndex]) == oTypesList[iIndex]: mismatch = True #Has there been a mismatch? if mismatch: sError = "Type mismatch in parameter %d (starting with param 0)" % iIndex return S_ERROR(sError) if len(args) < len(oTypesList): return S_ERROR("Function %s expects at least %s arguments" % (method, len(oTypesList))) except Exception, v: sError = "Error in parameter check: %s" % str(v) gLogger.exception(sError) return S_ERROR(sError)
def DataSourceToNetwork( self, dataSource ): if "read" not in dir( dataSource ): return S_ERROR( "%s data source object does not have a read method" % str( dataSource ) ) self.__oMD5 = md5.md5() iPacketSize = self.packetSize try: sBuffer = dataSource.read( iPacketSize ) while len( sBuffer ) > 0: dRetVal = self.sendData( sBuffer ) if not dRetVal[ 'OK' ]: return dRetVal if 'AbortTransfer' in dRetVal and dRetVal[ 'AbortTransfer' ]: self.__log.verbose( "Transfer aborted" ) return S_OK() sBuffer = dataSource.read( iPacketSize ) self.sendEOF() except Exception, e: gLogger.exception( "Error while sending file" ) return S_ERROR( "Error while sending file: %s" % str( e ) )
def FDToNetwork( self, iFD ): self.__oMD5 = md5.md5() iPacketSize = self.packetSize self.__fileBytes = 0 sentBytes = 0 try: sBuffer = os.read( iFD, iPacketSize ) while len( sBuffer ) > 0: dRetVal = self.sendData( sBuffer ) if not dRetVal[ 'OK' ]: return dRetVal if 'AbortTransfer' in dRetVal and dRetVal[ 'AbortTransfer' ]: self.__log.verbose( "Transfer aborted" ) return S_OK() sentBytes += len( sBuffer ) sBuffer = os.read( iFD, iPacketSize ) self.sendEOF() except Exception, e: gLogger.exception( "Error while sending file" ) return S_ERROR( "Error while sending file: %s" % str( e ) )
def FDToNetwork(self, iFD): self.__oMD5 = md5.md5() iPacketSize = self.packetSize self.__fileBytes = 0 sentBytes = 0 try: sBuffer = os.read(iFD, iPacketSize) while len(sBuffer) > 0: dRetVal = self.sendData(sBuffer) if not dRetVal['OK']: return dRetVal if 'AbortTransfer' in dRetVal and dRetVal['AbortTransfer']: self.__log.verbose("Transfer aborted") return S_OK() sentBytes += len(sBuffer) sBuffer = os.read(iFD, iPacketSize) self.sendEOF() except Exception, e: gLogger.exception("Error while sending file") return S_ERROR("Error while sending file: %s" % str(e))
def __RPCCallFunction(self, method, args): """ Check the arguments then call the RPC function :type method: string :param method: arguments sended by remote client :return: S_OK/S_ERROR """ realMethod = "export_%s" % method gLogger.debug("RPC to %s" % realMethod) try: # Get the method we are trying to call oMethod = getattr(self, realMethod) except: return S_ERROR("Unknown method %s" % method) # Check if the client sends correct arguments dRetVal = self.__checkExpectedArgumentTypes(method, args) if not dRetVal['OK']: return dRetVal # Lock the method with Semaphore to avoid too many calls at the same time self.__lockManager.lock("RPC/%s" % method) self.__msgBroker.addTransportId(self.__trid, self.serviceInfoDict['serviceName'], idleRead=True) try: try: # Trying to execute the method uReturnValue = oMethod(*args) return uReturnValue finally: # Unlock method self.__lockManager.unlock("RPC/%s" % method) self.__msgBroker.removeTransport(self.__trid, closeTransport=False) except Exception as e: gLogger.exception("Uncaught exception when serving RPC", "Function %s" % method, lException=e) return S_ERROR("Server error while serving %s: %s" % (method, str(e)))
def __realTrigger(self, eventName, params): gEventSync.lock() try: if eventName not in self.__events: return S_ERROR("Event %s is not registered" % eventName) if eventName in self.__processingEvents: return S_OK(0) eventFunctors = list(self.__events[eventName]) self.__processingEvents.add(eventName) finally: gEventSync.unlock() finalResult = S_OK() for functor in eventFunctors: try: result = functor(eventName, params) except Exception: gLogger.exception( "Listener %s for event %s raised an exception" % (functor.__name__, eventName)) continue if not isinstance(result, dict) or "OK" not in result: gLogger.error( "Listener for event did not return a S_OK/S_ERROR structure", "%s %s" % (functor.__name__, eventName), ) continue if not result["OK"]: finalResult = result break gEventSync.lock() try: self.__processingEvents.discard(eventName) finally: try: gEventSync.unlock() except Exception: pass if not finalResult["OK"]: return finalResult return S_OK(len(eventFunctors))
def __RPCCallFunction(self, method, args): realMethod = "export_%s" % method gLogger.debug("RPC to %s" % realMethod) try: oMethod = getattr(self, realMethod) except: return S_ERROR("Unknown method %s" % method) dRetVal = self.__checkExpectedArgumentTypes(method, args) if not dRetVal["OK"]: return dRetVal self.__lockManager.lock(method) self.__msgBroker.addTransportId(self.__trid, self.serviceInfoDict["serviceName"], idleRead=True) try: try: uReturnValue = oMethod(*args) return uReturnValue finally: self.__lockManager.unlock(method) self.__msgBroker.removeTransport(self.__trid, closeTransport=False) except Exception, v: gLogger.exception("Uncaught exception when serving RPC", "Function %s" % method) return S_ERROR("Server error while serving %s: %s" % (method, str(v)))
self.byteStream += retVal[ 'Value' ] if maxBufferSize and len( self.byteStream ) > maxBufferSize: return S_ERROR( "Read limit exceeded (%s chars)" % maxBufferSize ) #Data is here! take it out from the bytestream, dencode and return data = self.byteStream[ :size ] self.byteStream = self.byteStream[ size: ] try: data = DEncode.decode( data )[0] except Exception, e: return S_ERROR( "Could not decode received data: %s" % str( e ) ) if idleReceive: self.receivedMessages.append( data ) return S_OK() return data except Exception, e: gLogger.exception( "Network error while receiving data" ) return S_ERROR( "Network error while receiving data: %s" % str( e ) ) def __processKeepAlive( self, maxBufferSize, blockAfterKeepAlive = True ): gLogger.debug( "Received Keep Alive" ) #Next message down the stream will be the ka data result = self.receiveData( maxBufferSize, blockAfterKeepAlive = False ) if not result[ 'OK' ]: gLogger.debug( "Error while receiving keep alive: %s" % result[ 'Message' ] ) return result #Is it a valid ka? kaData = result[ 'Value' ] for reqField in ( 'id', 'kaping' ): if reqField not in kaData: errMsg = "Invalid keep alive, missing %s" % reqField gLogger.debug( errMsg )
def __getCAStore( self ): SocketInfo.__cachedCAsCRLsLoadLock.acquire() try: if not SocketInfo.__cachedCAsCRLs or time.time() - SocketInfo.__cachedCAsCRLsLastLoaded > 900: #Need to generate the CA Store casDict = {} crlsDict = {} casPath = Locations.getCAsLocation() if not casPath: return S_ERROR( "No valid CAs location found" ) gLogger.debug( "CAs location is %s" % casPath ) casFound = 0 crlsFound = 0 SocketInfo.__caStore = GSI.crypto.X509Store() for fileName in os.listdir( casPath ): filePath = os.path.join( casPath, fileName ) if not os.path.isfile( filePath ): continue fObj = file( filePath, "rb" ) pemData = fObj.read() fObj.close() #Try to load CA Cert try: caCert = GSI.crypto.load_certificate( GSI.crypto.FILETYPE_PEM, pemData ) if caCert.has_expired(): continue caID = ( caCert.get_subject().one_line(), caCert.get_issuer().one_line() ) caNotAfter = caCert.get_not_after() if caID not in casDict: casDict[ caID ] = ( caNotAfter, caCert ) casFound += 1 else: if casDict[ caID ][0] < caNotAfter: casDict[ caID ] = ( caNotAfter, caCert ) continue except: if fileName.find( ".0" ) == len( fileName ) - 2: gLogger.exception( "LOADING %s" % filePath ) if 'IgnoreCRLs' not in self.infoDict or not self.infoDict[ 'IgnoreCRLs' ]: #Try to load CRL try: crl = GSI.crypto.load_crl( GSI.crypto.FILETYPE_PEM, pemData ) if crl.has_expired(): continue crlID = crl.get_issuer().one_line() crlNotAfter = crl.get_not_after() if crlID not in crlsDict: crlsDict[ crlID ] = ( crlNotAfter, crl ) crlsFound += 1 else: if crlsDict[ crlID ][0] < crlNotAfter: crlsDict[ crlID ] = ( crlNotAfter, crl ) continue except: if fileName.find( ".r0" ) == len( fileName ) - 2: gLogger.exception( "LOADING %s" % filePath ) gLogger.debug( "Loaded %s CAs [%s CRLs]" % ( casFound, crlsFound ) ) SocketInfo.__cachedCAsCRLs = ( [ casDict[k][1] for k in casDict ], [ crlsDict[k][1] for k in crlsDict ] ) SocketInfo.__cachedCAsCRLsLastLoaded = time.time() except: gLogger.exception( "ASD" ) finally: SocketInfo.__cachedCAsCRLsLoadLock.release() #Generate CA Store caStore = GSI.crypto.X509Store() caList = SocketInfo.__cachedCAsCRLs[0] for caCert in caList: caStore.add_cert( caCert ) crlList = SocketInfo.__cachedCAsCRLs[1] for crl in crlList: caStore.add_crl( crl ) return S_OK( caStore )
if maxBufferSize and len(self.byteStream) > maxBufferSize: return S_ERROR("Read limit exceeded (%s chars)" % maxBufferSize) #Data is here! take it out from the bytestream, dencode and return data = self.byteStream[:size] self.byteStream = self.byteStream[size:] try: data = DEncode.decode(data)[0] except Exception, e: return S_ERROR("Could not decode received data: %s" % str(e)) if idleReceive: self.receivedMessages.append(data) return S_OK() return data except Exception, e: gLogger.exception("Network error while receiving data") return S_ERROR("Network error while receiving data: %s" % str(e)) def __processKeepAlive(self, maxBufferSize, blockAfterKeepAlive=True): gLogger.debug("Received Keep Alive") #Next message down the stream will be the ka data result = self.receiveData(maxBufferSize, blockAfterKeepAlive=False) if not result['OK']: gLogger.debug("Error while receiving keep alive: %s" % result['Message']) return result #Is it a valid ka? kaData = result['Value'] for reqField in ('id', 'kaping'): if reqField not in kaData: errMsg = "Invalid keep alive, missing %s" % reqField
def receiveData( self, maxBufferSize = 0, blockAfterKeepAlive = True, idleReceive = False ): self.__updateLastActionTimestamp() if self.receivedMessages: return self.receivedMessages.pop( 0 ) #Buffer size can't be less than 0 maxBufferSize = max( maxBufferSize, 0 ) try: #Look either for message length of keep alive magic string iSeparatorPosition = self.byteStream.find( ":", 0, 10 ) keepAliveMagicLen = len( BaseTransport.keepAliveMagic ) isKeepAlive = self.byteStream.find( BaseTransport.keepAliveMagic, 0, keepAliveMagicLen ) == 0 #While not found the message length or the ka, keep receiving while iSeparatorPosition == -1 and not isKeepAlive: retVal = self._read( 16384 ) #If error return if not retVal[ 'OK' ]: return retVal #If closed return error if not retVal[ 'Value' ]: return S_ERROR( "Peer closed connection" ) #New data! self.byteStream += retVal[ 'Value' ] #Look again for either message length of ka magic string iSeparatorPosition = self.byteStream.find( ":", 0, 10 ) isKeepAlive = self.byteStream.find( BaseTransport.keepAliveMagic, 0, keepAliveMagicLen ) == 0 #Over the limit? if maxBufferSize and len( self.byteStream ) > maxBufferSize and iSeparatorPosition == -1 : return S_ERROR( "Read limit exceeded (%s chars)" % maxBufferSize ) #Keep alive magic! if isKeepAlive: gLogger.debug( "Received keep alive header" ) #Remove the ka magic from the buffer and process the keep alive self.byteStream = self.byteStream[ keepAliveMagicLen: ] return self.__processKeepAlive( maxBufferSize, blockAfterKeepAlive ) #From here it must be a real message! #Process the size and remove the msg length from the bytestream pkgSize = int( self.byteStream[ :iSeparatorPosition ] ) pkgData = self.byteStream[ iSeparatorPosition + 1: ] readSize = len( pkgData ) if readSize >= pkgSize: #If we already have all the data we need data = pkgData[ :pkgSize ] self.byteStream = pkgData[ pkgSize: ] else: #If we still need to read stuff pkgMem = cStringIO.StringIO() pkgMem.write( pkgData ) #Receive while there's still data to be received while readSize < pkgSize: retVal = self._read( pkgSize - readSize, skipReadyCheck = True ) if not retVal[ 'OK' ]: return retVal if not retVal[ 'Value' ]: return S_ERROR( "Peer closed connection" ) rcvData = retVal[ 'Value' ] readSize += len( rcvData ) pkgMem.write( rcvData ) if maxBufferSize and readSize > maxBufferSize: return S_ERROR( "Read limit exceeded (%s chars)" % maxBufferSize ) #Data is here! take it out from the bytestream, dencode and return if readSize == pkgSize: data = pkgMem.getvalue() self.byteStream = "" else: #readSize > pkgSize: pkgMem.seek( 0, 0 ) data = pkgMem.read( pkgSize ) self.byteStream = pkgMem.read() try: data = DEncode.decode( data )[0] except Exception as e: return S_ERROR( "Could not decode received data: %s" % str( e ) ) if idleReceive: self.receivedMessages.append( data ) return S_OK() return data except Exception as e: gLogger.exception( "Network error while receiving data" ) return S_ERROR( "Network error while receiving data: %s" % str( e ) )
def __showException(self, threadedJob, exceptionInfo): if gLogger: gLogger.exception("Exception in thread", lExcInfo=exceptionInfo)
def receiveData(self, maxBufferSize=0, blockAfterKeepAlive=True, idleReceive=False): self.__updateLastActionTimestamp() if self.receivedMessages: return self.receivedMessages.pop(0) #Buffer size can't be less than 0 maxBufferSize = max(maxBufferSize, 0) try: #Look either for message length of keep alive magic string iSeparatorPosition = self.byteStream.find(":", 0, 10) keepAliveMagicLen = len(BaseTransport.keepAliveMagic) isKeepAlive = self.byteStream.find(BaseTransport.keepAliveMagic, 0, keepAliveMagicLen) == 0 #While not found the message length or the ka, keep receiving while iSeparatorPosition == -1 and not isKeepAlive: retVal = self._read(16384) #If error return if not retVal['OK']: return retVal #If closed return error if not retVal['Value']: return S_ERROR("Peer closed connection") #New data! self.byteStream += retVal['Value'] #Look again for either message length of ka magic string iSeparatorPosition = self.byteStream.find(":", 0, 10) isKeepAlive = self.byteStream.find( BaseTransport.keepAliveMagic, 0, keepAliveMagicLen) == 0 #Over the limit? if maxBufferSize and len( self.byteStream ) > maxBufferSize and iSeparatorPosition == -1: return S_ERROR("Read limit exceeded (%s chars)" % maxBufferSize) #Keep alive magic! if isKeepAlive: gLogger.debug("Received keep alive header") #Remove the ka magic from the buffer and process the keep alive self.byteStream = self.byteStream[keepAliveMagicLen:] return self.__processKeepAlive(maxBufferSize, blockAfterKeepAlive) #From here it must be a real message! #Process the size and remove the msg length from the bytestream pkgSize = int(self.byteStream[:iSeparatorPosition]) pkgData = self.byteStream[iSeparatorPosition + 1:] readSize = len(pkgData) if readSize >= pkgSize: #If we already have all the data we need data = pkgData[:pkgSize] self.byteStream = pkgData[pkgSize:] else: #If we still need to read stuff pkgMem = cStringIO.StringIO() pkgMem.write(pkgData) #Receive while there's still data to be received while readSize < pkgSize: retVal = self._read(pkgSize - readSize, skipReadyCheck=True) if not retVal['OK']: return retVal if not retVal['Value']: return S_ERROR("Peer closed connection") rcvData = retVal['Value'] readSize += len(rcvData) pkgMem.write(rcvData) if maxBufferSize and readSize > maxBufferSize: return S_ERROR("Read limit exceeded (%s chars)" % maxBufferSize) #Data is here! take it out from the bytestream, dencode and return if readSize == pkgSize: data = pkgMem.getvalue() self.byteStream = "" else: #readSize > pkgSize: pkgMem.seek(0, 0) data = pkgMem.read(pkgSize) self.byteStream = pkgMem.read() try: data = DEncode.decode(data)[0] except Exception as e: return S_ERROR("Could not decode received data: %s" % str(e)) if idleReceive: self.receivedMessages.append(data) return S_OK() return data except Exception as e: gLogger.exception("Network error while receiving data") return S_ERROR("Network error while receiving data: %s" % str(e))
def __showException( self, threadedJob, exceptionInfo ): if gLogger: gLogger.exception( "Exception in thread", lExcInfo = exceptionInfo )
def _connect(self): """ Establish the connection. It uses the URL discovered in __discoverURL. In case the connection cannot be established, __discoverURL is called again, and _connect calls itself. We stop after trying self.__nbOfRetry * self.__nbOfUrls """ # Check if the useServerCertificate configuration changed # Note: I am not really sure that all this block makes # any sense at all since all these variables are # evaluated in __discoverCredentialsToUse if gConfig.useServerCertificate() != self.__useCertificates: if self.__forceUseCertificates is None: self.__useCertificates = gConfig.useServerCertificate() self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates # The server certificate use context changed, rechecking the transport sanity result = self.__checkTransportSanity() if not result['OK']: return result # Take all the extra credentials self.__discoverExtraCredentials() if not self.__initStatus['OK']: return self.__initStatus if self.__enableThreadCheck: self.__checkThreadID() gLogger.debug("Connecting to: %s" % self.serviceURL) try: # Calls the transport method of the apropriate protocol. # self.__URLTuple[1:3] = [server name, port, System/Component] transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs) # the socket timeout is the default value which is 1. # later we increase to 5 retVal = transport.initAsClient() # If we have an issue connecting if not retVal['OK']: # We try at most __nbOfRetry each URLs if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1: # Recompose the URL (why not using self.serviceURL ? ) url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3]) # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so) if url not in self.__bannedUrls: self.__bannedUrls += [url] # Why only printing in this case ? if len(self.__bannedUrls) < self.__nbOfUrls: gLogger.notice("Non-responding URL temporarily banned", "%s" % url) # Increment the retry couunter self.__retry += 1 # If it is our last attempt for each URL, we increase the timeout if self.__retryCounter == self.__nbOfRetry - 1: transport.setSocketTimeout(5) # we increase the socket timeout in case the network is not good gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL)) # If we tried all the URL, we increase the global counter (__retryCounter), and sleep if len(self.__bannedUrls) == self.__nbOfUrls: self.__retryCounter += 1 # we run only one service! In that case we increase the retry delay. self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2 gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay) time.sleep(self.__retryDelay) # rediscover the URL self.__discoverURL() # try to reconnect return self._connect() else: return retVal except Exception as e: gLogger.exception(lException=True, lExcInfo=True) return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e))) # We add the connection to the transport pool trid = getGlobalTransportPool().add(transport) return S_OK((trid, transport))
def __getCAStore(self): SocketInfo.__cachedCAsCRLsLoadLock.acquire() try: if not SocketInfo.__cachedCAsCRLs or time.time() - SocketInfo.__cachedCAsCRLsLastLoaded > 900: # Need to generate the CA Store casDict = {} crlsDict = {} casPath = Locations.getCAsLocation() if not casPath: return S_ERROR("No valid CAs location found") gLogger.debug("CAs location is %s" % casPath) casFound = 0 crlsFound = 0 SocketInfo.__caStore = GSI.crypto.X509Store() for fileName in os.listdir(casPath): filePath = os.path.join(casPath, fileName) if not os.path.isfile(filePath): continue fObj = file(filePath, "rb") pemData = fObj.read() fObj.close() # Try to load CA Cert try: caCert = GSI.crypto.load_certificate(GSI.crypto.FILETYPE_PEM, pemData) if caCert.has_expired(): continue caID = (caCert.get_subject().one_line(), caCert.get_issuer().one_line()) caNotAfter = caCert.get_not_after() if caID not in casDict: casDict[caID] = (caNotAfter, caCert) casFound += 1 else: if casDict[caID][0] < caNotAfter: casDict[caID] = (caNotAfter, caCert) continue except BaseException: if fileName.find(".0") == len(fileName) - 2: gLogger.exception("LOADING %s" % filePath) if 'IgnoreCRLs' not in self.infoDict or not self.infoDict['IgnoreCRLs']: # Try to load CRL try: crl = GSI.crypto.load_crl(GSI.crypto.FILETYPE_PEM, pemData) if crl.has_expired(): continue crlID = crl.get_issuer().one_line() crlsDict[crlID] = crl crlsFound += 1 continue except Exception as e: if fileName.find(".r0") == len(fileName) - 2: gLogger.exception("LOADING %s ,Exception: %s" % (filePath, str(e))) gLogger.debug("Loaded %s CAs [%s CRLs]" % (casFound, crlsFound)) SocketInfo.__cachedCAsCRLs = ([casDict[k][1] for k in casDict], [crlsDict[k] for k in crlsDict]) SocketInfo.__cachedCAsCRLsLastLoaded = time.time() except BaseException: gLogger.exception("Failed to init CA store") finally: SocketInfo.__cachedCAsCRLsLoadLock.release() # Generate CA Store caStore = GSI.crypto.X509Store() caList = SocketInfo.__cachedCAsCRLs[0] for caCert in caList: caStore.add_cert(caCert) crlList = SocketInfo.__cachedCAsCRLs[1] for crl in crlList: caStore.add_crl(crl) return S_OK(caStore)
def _connect(self): """ Establish the connection. It uses the URL discovered in __discoverURL. In case the connection cannot be established, __discoverURL is called again, and _connect calls itself. We stop after trying self.__nbOfRetry * self.__nbOfUrls :return: S_OK()/S_ERROR() """ # Check if the useServerCertificate configuration changed # Note: I am not really sure that all this block makes # any sense at all since all these variables are # evaluated in __discoverCredentialsToUse if gConfig.useServerCertificate() != self.__useCertificates: if self.__forceUseCertificates is None: self.__useCertificates = gConfig.useServerCertificate() self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates # The server certificate use context changed, rechecking the transport sanity result = self.__checkTransportSanity() if not result['OK']: return result # Take all the extra credentials self.__discoverExtraCredentials() if not self.__initStatus['OK']: return self.__initStatus if self.__enableThreadCheck: self.__checkThreadID() gLogger.debug("Trying to connect to: %s" % self.serviceURL) try: # Calls the transport method of the apropriate protocol. # self.__URLTuple[1:3] = [server name, port, System/Component] transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs) # the socket timeout is the default value which is 1. # later we increase to 5 retVal = transport.initAsClient() # We try at most __nbOfRetry each URLs if not retVal['OK']: gLogger.warn("Issue getting socket:", "%s : %s : %s" % (transport, self.__URLTuple, retVal['Message'])) # We try at most __nbOfRetry each URLs if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1: # Recompose the URL (why not using self.serviceURL ? ) url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3]) # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so) if url not in self.__bannedUrls: gLogger.warn("Non-responding URL temporarily banned", "%s" % url) self.__bannedUrls += [url] # Increment the retry counter self.__retry += 1 # 16.07.20 CHRIS: I guess this setSocketTimeout does not behave as expected. # If the initasClient did not work, we anyway re-enter the whole method, # so a new transport object is created. # However, it migh be that this timeout value was propagated down to the # SocketInfoFactory singleton, and thus used, but that means that the timeout # specified in parameter was then void. # If it is our last attempt for each URL, we increase the timeout if self.__retryCounter == self.__nbOfRetry - 1: transport.setSocketTimeout(5) # we increase the socket timeout in case the network is not good gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL)) # If we tried all the URL, we increase the global counter (__retryCounter), and sleep if len(self.__bannedUrls) == self.__nbOfUrls: self.__retryCounter += 1 # we run only one service! In that case we increase the retry delay. self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2 gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay) time.sleep(self.__retryDelay) # rediscover the URL self.__discoverURL() # try to reconnect return self._connect() else: return retVal except Exception as e: gLogger.exception(lException=True, lExcInfo=True) return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e))) # We add the connection to the transport pool gLogger.debug("Connected to: %s" % self.serviceURL) trid = getGlobalTransportPool().add(transport) return S_OK((trid, transport))