def init_logging(log_level): """ Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for. """ log_level_filter = LogLevelFilterPredicate( LogLevel.levelWithName(log_level)) log_level_filter.setLogLevelForNamespace( 'twisted.web.client._HTTP11ClientFactory', LogLevel.warn) log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [log_level_filter]) globalLogPublisher.addObserver(log_observer)
def init_logging(log_level): """ Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for. """ log_level_filter = LogLevelFilterPredicate( LogLevel.levelWithName(log_level)) log_level_filter.setLogLevelForNamespace( 'twisted.web.client._HTTP11ClientFactory', LogLevel.warn) log_observer = FilteringLogObserver( textFileLogObserver(sys.stdout), [log_level_filter]) globalLogPublisher.addObserver(log_observer)
def getLogger(level): loglevel = getattr(LogLevel, level) filter_ = LogLevelFilterPredicate(defaultLogLevel=loglevel) if loglevel > LogLevel.debug: filter_.setLogLevelForNamespace('stdout', LogLevel.warn) observer = FilteringLogObserver(stdoutFileLogObserver(), [filter_]) # observer = FilteringLogObserver(globalLogPublisher, [filter]) # log = Logger() # globalLogBeginner.beginLoggingTo([observer]) globalLogPublisher.addObserver(observer) return lambda event: None
def setup_logging(log_level, log_name, log_directory=""): """ Configure the logger to use the specified log file and log level """ log_filter = LogLevelFilterPredicate() log_filter.setLogLevelForNamespace( "orscanner", LogLevel.levelWithName(log_level.lower())) # Set up logging log_file = DailyLogFile(log_name, log_directory) file_observer = FileLogObserver(log_file, log_event_format) console_observer = FileLogObserver(sys.stdout, log_event_format) file_filter_observer = FilteringLogObserver(file_observer, (log_filter, )) console_filter_observer = FilteringLogObserver(console_observer, (log_filter, )) globalLogPublisher.addObserver(file_filter_observer) globalLogPublisher.addObserver(console_filter_observer)
def configure(verbose, quiet, shutup): client._HTTP11ClientFactory.noisy = False if not logging.getLogger().handlers: logging_handler = logging.StreamHandler(sys.stdout) logging_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(logging_handler) if verbose: from twisted.logger import ( LogLevel, globalLogBeginner, textFileLogObserver, FilteringLogObserver, LogLevelFilterPredicate) twisted_log_filtler = LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn) twisted_log_filtler.setLogLevelForNamespace(namespace="stdout", level=LogLevel.critical) twisted_log_filtler.setLogLevelForNamespace(namespace="twisted", level=LogLevel.warn) twisted_log_filtler.setLogLevelForNamespace(namespace="mqtt", level=LogLevel.warn) globalLogBeginner.beginLoggingTo([FilteringLogObserver(observer=textFileLogObserver(sys.stdout), predicates=[twisted_log_filtler])], redirectStandardIO=False) logging.getLogger().setLevel(logging.FATAL if shutup else logging.ERROR if quiet else logging.DEBUG if verbose else logging.INFO)
class IRCd(Service): def __init__(self, configFileName): self.config = Config(self, configFileName) self.boundPorts = {} self.loadedModules = {} self._loadedModuleData = {} self._unloadingModules = {} self.commonModules = set() self.userCommands = {} self.serverCommands = {} self.channelModes = ({}, {}, {}, {}) self.channelStatuses = {} self.channelStatusSymbols = {} self.channelStatusOrder = [] self.channelModeTypes = {} self.userModes = ({}, {}, {}, {}) self.userModeTypes = {} self.actions = {} self.storage = None self.storageSyncer = None self.dataCache = {} self.functionCache = {} self.serverID = None self.name = None self.isupport_tokens = { "CASEMAPPING": "strict-rfc1459", "CHANTYPES": "#", } self._uid = self._genUID() self.users = {} self.userNicks = CaseInsensitiveDictionary() self.channels = CaseInsensitiveDictionary(WeakValueDictionary) self.servers = {} self.serverNames = CaseInsensitiveDictionary() self.recentlyQuitUsers = {} self.recentlyQuitServers = {} self.recentlyDestroyedChannels = CaseInsensitiveDictionary() self.pruneRecentlyQuit = None self.pruneRecentChannels = None self._logFilter = LogLevelFilterPredicate() filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,)) self.log = Logger("txircd", observer=filterObserver) self.startupTime = None def startService(self): self.log.info("Starting up...") self.startupTime = now() self.log.info("Loading configuration...") self.config.reload() self.name = self.config["server_name"] self.serverID = self.config["server_id"] self.log.info("Loading storage...") self.storage = shelve.open(self.config["datastore_path"], writeback=True) self.storageSyncer = LoopingCall(self.storage.sync) self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False) self.log.info("Starting processes...") self.pruneRecentlyQuit = LoopingCall(self.pruneQuit) self.pruneRecentlyQuit.start(10, now=False) self.pruneRecentChannels = LoopingCall(self.pruneChannels) self.pruneRecentChannels.start(15, now=False) self.log.info("Loading modules...") self._loadModules() self.log.info("Binding ports...") self._bindPorts() self.log.info("txircd started!") try: self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"])) except (KeyError, InvalidLogLevelError): self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn) self.runActionStandard("startup") def stopService(self): stopDeferreds = [] self.log.info("Disconnecting servers...") serverList = self.servers.values() # Take the list of server objects self.servers = {} # And then destroy the server dict to inhibit server objects generating lots of noise for server in serverList: if server.nextClosest == self.serverID: stopDeferreds.append(server.disconnectedDeferred) allUsers = self.users.keys() for user in allUsers: if user[:3] == server.serverID: del self.users[user] server.transport.loseConnection() self.log.info("Disconnecting users...") userList = self.users.values() # Basically do the same thing I just did with the servers self.users = {} for user in userList: if user.transport: stopDeferreds.append(user.disconnectedDeferred) user.transport.loseConnection() self.log.info("Unloading modules...") moduleList = self.loadedModules.keys() for module in moduleList: self._unloadModule(module, False) # Incomplete unload is done to save time and because side effects are destroyed anyway self.log.info("Stopping processes...") if self.pruneRecentlyQuit.running: self.pruneRecentlyQuit.stop() if self.pruneRecentChannels.running: self.pruneRecentChannels.stop() self.log.info("Closing data storage...") if self.storageSyncer.running: self.storageSyncer.stop() self.storage.close() # a close() will sync() also self.log.info("Releasing ports...") stopDeferreds.extend(self._unbindPorts()) return DeferredList(stopDeferreds) def _loadModules(self): for module in getPlugins(IModuleData, txircd.modules): if module.name in self.loadedModules: continue if module.core or module.name in self.config["modules"]: self._loadModuleData(module) for moduleName in self.config["modules"]: if moduleName not in self.loadedModules: self.log.warn("The module {module} failed to load.", module=moduleName) def loadModule(self, moduleName): """ Loads a module of the specified name. Raises ModuleLoadError if the module cannot be loaded. If the specified module is currently being unloaded, returns the DeferredList specified by the module when it was unloading with a callback to try to load the module again once it succeeds. """ if moduleName in self._unloadingModules: deferList = self._unloadingModules[moduleName] deferList.addCallback(self._tryLoadAgain, moduleName) return deferList for module in getPlugins(IModuleData, txircd.modules): if module.name == moduleName: rebuild(importlib.import_module(module.__module__)) # getPlugins doesn't recompile modules, so let's do that ourselves. self._loadModuleData(module) self.log.info("Loaded module {module}.", module=moduleName) break def _tryLoadAgain(self, _, moduleName): self.loadModule(moduleName) def _loadModuleData(self, module): if not IModuleData.providedBy(module): raise ModuleLoadError ("???", "Module does not implement module interface") if not module.name: raise ModuleLoadError ("???", "Module did not provide a name") if module.name in self.loadedModules: self.log.debug("Not loading {module.name} because it's already loaded", module=module) return self.log.debug("Beginning to load {module.name}...", module=module) module.hookIRCd(self) try: module.verifyConfig(self.config) except ConfigError as e: raise ModuleLoadError(module.name, e) self.log.debug("Loading hooks from {module.name}...", module=module) moduleData = { "channelmodes": module.channelModes(), "usermodes": module.userModes(), "actions": module.actions(), "usercommands": module.userCommands(), "servercommands": module.serverCommands() } newChannelModes = ({}, {}, {}, {}) newChannelStatuses = {} newUserModes = ({}, {}, {}, {}) newActions = {} newUserCommands = {} newServerCommands = {} common = False self.log.debug("Processing hook data from {module.name}...", module=module) for mode in moduleData["channelmodes"]: if mode[0] in self.channelModeTypes: raise ModuleLoadError (module.name, "Tries to implement channel mode +{} when that mode is already implemented.".format(mode[0])) if not IMode.providedBy(mode[2]): raise ModuleLoadError (module.name, "Returns a channel mode object (+{}) that doesn't implement IMode.".format(mode[0])) if mode[1] == ModeType.Status: if mode[4] in self.channelStatusSymbols: raise ModuleLoadError (module.name, "Tries to create a channel rank with symbol {} when that symbol is already in use.".format(mode[4])) try: newChannelStatuses[mode[0]] = (mode[4], mode[3], mode[2]) except IndexError: raise ModuleLoadError (module.name, "Specifies channel status mode {} without a rank or symbol".format(mode[0])) else: newChannelModes[mode[1]][mode[0]] = mode[2] common = True for mode in moduleData["usermodes"]: if mode[0] in self.userModeTypes: raise ModuleLoadError (module.name, "Tries to implement user mode +{} when that mode is already implemented.".format(mode[0])) if not IMode.providedBy(mode[2]): raise ModuleLoadError (module.name, "Returns a user mode object (+{}) that doesn't implement IMode.".format(mode[0])) newUserModes[mode[1]][mode[0]] = mode[2] common = True for action in moduleData["actions"]: if action[0] not in newActions: newActions[action[0]] = [(action[2], action[1])] else: newActions[action[0]].append((action[2], action[1])) for command in moduleData["usercommands"]: if not ICommand.providedBy(command[2]): raise ModuleLoadError (module.name, "Returns a user command object ({}) that doesn't implement ICommand.".format(command[0])) if command[0] not in newUserCommands: newUserCommands[command[0]] = [] newUserCommands[command[0]].append((command[2], command[1])) for command in moduleData["servercommands"]: if not ICommand.providedBy(command[2]): raise ModuleLoadError (module.name, "Returns a server command object ({}) that doesnt implement ICommand.".format(command[0])) if command[0] not in newServerCommands: newServerCommands[command[0]] = [] newServerCommands[command[0]].append((command[2], command[1])) common = True if not common: common = module.requiredOnAllServers self.log.debug("Loaded data from {module.name}; committing data and calling hooks...", module=module) module.load() self.loadedModules[module.name] = module self._loadedModuleData[module.name] = moduleData if common: self.commonModules.add(module.name) self.runActionStandard("moduleload", module.name) for modeType, typeSet in enumerate(newChannelModes): for mode, implementation in typeSet.iteritems(): self.channelModeTypes[mode] = modeType self.channelModes[modeType][mode] = implementation for mode, data in newChannelStatuses.iteritems(): self.channelModeTypes[mode] = ModeType.Status self.channelStatuses[mode] = data self.channelStatusSymbols[data[0]] = mode for index, status in enumerate(self.channelStatusOrder): if self.channelStatuses[status][1] < data[1]: self.channelStatusOrder.insert(index, mode) break else: self.channelStatusOrder.append(mode) for modeType, typeSet in enumerate(newUserModes): for mode, implementation in typeSet.iteritems(): self.userModeTypes[mode] = modeType self.userModes[modeType][mode] = implementation for action, actionList in newActions.iteritems(): if action not in self.actions: self.actions[action] = [] for actionData in actionList: for index, handlerData in enumerate(self.actions[action]): if handlerData[1] < actionData[1]: self.actions[action].insert(index, actionData) break else: self.actions[action].append(actionData) for command, dataList in newUserCommands.iteritems(): if command not in self.userCommands: self.userCommands[command] = [] for data in dataList: for index, cmd in enumerate(self.userCommands[command]): if cmd[1] < data[1]: self.userCommands[command].insert(index, data) break else: self.userCommands[command].append(data) for command, dataList in newServerCommands.iteritems(): if command not in self.serverCommands: self.serverCommands[command] = [] for data in dataList: for index, cmd in enumerate(self.serverCommands[command]): if cmd[1] < data[1]: self.serverCommands[command].insert(index, data) break else: self.serverCommands[command].append(data) self.log.debug("Module {module.name} is now fully loaded.", module=module) def unloadModule(self, moduleName): """ Unloads the loaded module with the given name. Raises ValueError if the module cannot be unloaded because it's a core module. """ self._unloadModule(moduleName, True) self.log.info("Unloaded module {module}.", module=moduleName) def _unloadModule(self, moduleName, fullUnload): unloadDeferreds = [] if moduleName not in self.loadedModules: return module = self.loadedModules[moduleName] if fullUnload and module.core: raise ValueError ("The module you're trying to unload is a core module.") moduleData = self._loadedModuleData[moduleName] d = module.unload() if d is not None: unloadDeferreds.append(d) if fullUnload: d = module.fullUnload() if d is not None: unloadDeferreds.append(d) for modeData in moduleData["channelmodes"]: if fullUnload: # Unset modes on full unload if modeData[1] == ModeType.Status: for channel in self.channels.itervalues(): removeFromChannel = [] for user, userData in channel.user.iteritems(): if modeData[0] in userData["status"]: removeFromChannel.append((False, modeData[0], user.uuid)) channel.setModes(removeFromChannel, self.serverID) elif modeData[1] == ModeType.List: for channel in self.channels.itervalues(): if modeData[0] in channel.modes: removeFromChannel = [] for paramData in channel.modes[modeData[0]]: removeFromChannel.append((False, modeData[0], paramData[0])) channel.setModes(removeFromChannel, self.serverID) else: for channel in self.channels.itervalues(): if modeData[0] in channel.modes: channel.setModes([(False, modeData[0], channel.modes[modeData[0]])], self.serverID) if modeData[1] == ModeType.Status: del self.channelStatuses[modeData[0]] del self.channelStatusSymbols[modeData[4]] self.channelStatusOrder.remove(modeData[0]) else: del self.channelModes[modeData[1]][modeData[0]] del self.channelModeTypes[modeData[0]] for modeData in moduleData["usermodes"]: if fullUnload: # Unset modes on full unload if modeData[1] == ModeType.List: for user in self.users.itervalues(): if modeData[0] in user.modes: removeFromUser = [] for paramData in user.modes[modeData[0]]: removeFromUser.append((False, modeData[0], paramData[0])) user.setModes(removeFromUser, self.serverID) else: for user in self.users.itervalues(): if modeData[0] in user.modes: user.setModes([(False, modeData[0], user.modes[modeData[0]])], self.serverID) del self.userModes[modeData[1]][modeData[0]] del self.userModeTypes[modeData[0]] for actionData in moduleData["actions"]: self.actions[actionData[0]].remove((actionData[2], actionData[1])) if not self.actions[actionData[0]]: del self.actions[actionData[0]] for commandData in moduleData["usercommands"]: self.userCommands[commandData[0]].remove((commandData[2], commandData[1])) if not self.userCommands[commandData[0]]: del self.userCommands[commandData[0]] for commandData in moduleData["servercommands"]: self.serverCommands[commandData[0]].remove((commandData[2], commandData[1])) if not self.serverCommands[commandData[0]]: del self.serverCommands[commandData[0]] del self.loadedModules[moduleName] del self._loadedModuleData[moduleName] if fullUnload: self.runActionStandard("moduleunload", module.name) if unloadDeferreds: deferList = DeferredList(unloadDeferreds) self._unloadingModules[moduleName] = deferList deferList.addCallback(self._removeFromUnloadingList, moduleName) return deferList def _removeFromUnloadingList(self, _, moduleName): del self._unloadingModules[moduleName] def reloadModule(self, moduleName): """ Reloads the module with the given name. Returns a DeferredList if the module unloads with one or more Deferreds. May raise ModuleLoadError if the module cannot be loaded. """ deferList = self._unloadModule(moduleName, False) if deferList is None: deferList = self.loadModule(moduleName) else: deferList.addCallback(lambda result: self.loadModule(moduleName)) return deferList def verifyConfig(self, config): # IRCd if "server_name" not in config: raise ConfigValidationError("server_name", "required item not found in configuration file.") if not isinstance(config["server_name"], basestring): raise ConfigValidationError("server_name", "value must be a string") if len(config["server_name"]) > 64: config["server_name"] = config["server_name"][:64] self.logConfigValidationWarning("server_name", "value is too long and has been truncated", config["server_name"]) if not re.match(r"^[a-zA-Z0-9.-]+\.[a-zA-Z0-9.-]+$", config["server_name"]): raise ConfigValidationError("server_name", "server name must look like a valid hostname.") if "server_id" in config: if not isinstance(config["server_id"], basestring): raise ConfigValidationError("server_id", "value must be a string") else: config["server_id"] = config["server_id"].upper() else: randFromName = random.Random(config["server_name"]) serverID = randFromName.choice(string.digits) + randFromName.choice(string.digits + string.ascii_uppercase) + randFromName.choice(string.digits + string.ascii_uppercase) config["server_id"] = serverID if len(config["server_id"]) != 3 or not config["server_id"].isalnum() or not config["server_id"][0].isdigit(): raise ConfigValidationError("server_id", "value must be a 3-character alphanumeric string starting with a number.") if "server_description" not in config: raise ConfigValidationError("server_description", "required item not found in configuration file.") if not isinstance(config["server_description"], basestring): raise ConfigValidationError("server_description", "value must be a string") if not config["server_description"]: raise ConfigValidationError("server_description", "value must not be an empty string") if len(config["server_description"]) > 255: config["server_description"] = config["server_description"][:255] self.logConfigValidationWarning("server_description", "value is too long and has been truncated", config["server_description"]) if "network_name" not in config: raise ConfigValidationError("network_name", "required item not found in configuration file.") if not isinstance(config["network_name"], basestring): raise ConfigValidationError("network_name", "value must be a string") if not config["network_name"]: raise ConfigValidationError("network_name", "value must not be an empty string") if " " in config["network_name"]: raise ConfigValidationError("network_name", "value cannot have spaces") if len(config["network_name"]) > 32: config["network_name"] = config["network_name"][:32] self.logConfigValidationWarning("network_name", "value is too long", config["network_name"]) if "bind_client" not in config: config["bind_client"] = [ "tcp:6667:interface={::}" ] self.logConfigValidationWarning("bind_client", "no default client binding specified", "[ \"tcp:6667:interface={::}\" ]") if not isinstance(config["bind_client"], list): raise ConfigValidationError("bind_client", "value must be a list") for bindDesc in config["bind_client"]: if not isinstance(bindDesc, basestring): raise ConfigValidationError("bind_client", "every entry must be a string") if "bind_server" not in config: config["bind_server"] = [] if not isinstance(config["bind_server"], list): raise ConfigValidationError("bind_server", "value must be a list") for bindDesc in config["bind_server"]: if not isinstance(bindDesc, basestring): raise ConfigValidationError("bind_server", "every entry must be a string") if "modules" not in config: config["modules"] = [] if not isinstance(config["modules"], list): raise ConfigValidationError("modules", "value must be a list") for module in config["modules"]: if not isinstance(module, basestring): raise ConfigValidationError("modules", "every entry must be a string") if "links" in config: if not isinstance(config["links"], dict): raise ConfigValidationError("links", "value must be a dictionary") for desc, server in config["links"].iteritems(): if not isinstance(desc, basestring): raise ConfigValidationError("links", "\"{}\" is an invalid server description".format(desc)) if not isinstance(server, dict): raise ConfigValidationError("links", "values for \"{}\" must be a dictionary".format(desc)) if "connect_descriptor" not in server: raise ConfigValidationError("links", "server \"{}\" must contain a \"connect_descriptor\" value".format(desc)) if "in_password" in server: if not isinstance(server["in_password"], basestring): config["links"][desc]["in_password"] = str(server["in_password"]) if "out_password" in server: if not isinstance(server["out_password"], basestring): config["links"][desc]["out_password"] = str(server["out_password"]) if "datastore_path" not in config: config["datastore_path"] = "data.db" if "storage_sync_interval" in config and not isinstance(config["storage_sync_interval"], int): raise ConfigValidationError(config["storage_sync_interval"], "invalid number") # Channels if "channel_name_length" in config: if not isinstance(config["channel_name_length"], int) or config["channel_name_length"] < 0: raise ConfigValidationError("channel_name_length", "invalid number") elif config["channel_name_length"] > 64: config["channel_name_length"] = 64 self.logConfigValidationWarning("channel_name_length", "value is too large", 64) if "modes_per_line" in config: if not isinstance(config["modes_per_line"], int) or config["modes_per_line"] < 0: raise ConfigValidationError("modes_per_line", "invalid number") elif config["modes_per_line"] > 20: config["modes_per_line"] = 20 self.logConfigValidationWarning("modes_per_line", "value is too large", 20) if "channel_listmode_limit" in config: if not isinstance(config["channel_listmode_limit"], int) or config["channel_listmode_limit"] < 0: raise ConfigValidationError("channel_listmode_limit", "invalid number") if config["channel_listmode_limit"] > 256: config["channel_listmode_limit"] = 256 self.logConfigValidationWarning("channel_listmode_limit", "value is too large", 256) # Users if "user_registration_timeout" in config: if not isinstance(config["user_registration_timeout"], int) or config["user_registration_timeout"] < 0: raise ConfigValidationError("user_registration_timeout", "invalid number") elif config["user_registration_timeout"] < 10: config["user_registration_timeout"] = 10 self.logConfigValidationWarning("user_registration_timeout", "timeout could be too short for clients to register in time", 10) if "user_ping_frequency" in config and (not isinstance(config["user_ping_frequency"], int) or config["user_ping_frequency"] < 0): raise ConfigValidationError("user_ping_frequency", "invalid number") if "hostname_length" in config: if not isinstance(config["hostname_length"], int) or config["hostname_length"] < 0: raise ConfigValidationError("hostname_length", "invalid number") elif config["hostname_length"] > 64: config["hostname_length"] = 64 self.logConfigValidationWarning("hostname_length", "value is too large", 64) elif config["hostname_length"] < 4: config["hostname_length"] = 4 self.logConfigValidationWarning("hostname_length", "value is too small", 4) if "ident_length" in config: if not isinstance(config["ident_length"], int) or config["ident_length"] < 0: raise ConfigValidationError("ident_length", "invalid number") elif config["ident_length"] > 12: config["ident_length"] = 12 self.logConfigValidationWarning("ident_length", "value is too large", 12) elif config["ident_length"] < 1: config["ident_length"] = 1 self.logConfigValidationWarning("ident_length", "value is too small", 1) if "gecos_length" in config: if not isinstance(config["gecos_length"], int) or config["gecos_length"] < 0: raise ConfigValidationError("gecos_length", "invalid number") elif config["gecos_length"] > 128: config["gecos_length"] = 128 self.logConfigValidationWarning("gecos_length", "value is too large", 128) elif config["gecos_length"] < 1: config["gecos_length"] = 1 self.logConfigValidationWarning("gecos_length", "value is too small", 1) if "user_listmode_limit" in config: if not isinstance(config["user_listmode_limit"], int) or config["user_listmode_limit"] < 0: raise ConfigValidationError("user_listmode_limit", "invalid number") if config["user_listmode_limit"] > 256: config["user_listmode_limit"] = 256 self.logConfigValidationWarning("user_listmode_limit", "value is too large", 256) # Servers if "server_registration_timeout" in config: if not isinstance(config["server_registration_timeout"], int) or config["server_registration_timeout"] < 0: raise ConfigValidationError("server_registration_timeout", "invalid number") elif config["server_registration_timeout"] < 10: config["server_registration_timeout"] = 10 self.logConfigValidationWarning("server_registration_timeout", "timeout could be too short for servers to register in time", 10) if "server_ping_frequency" in config and (not isinstance(config["server_ping_frequency"], int) or config["server_ping_frequency"] < 0): raise ConfigValidationError("server_ping_frequency", "invalid number") for module in self.loadedModules.itervalues(): module.verifyConfig(config) def logConfigValidationWarning(self, key, message, default): self.log.warn("Config value \"{configKey}\" is invalid ({message}); the value has been set to a default of \"{default}\".", configKey=key, message=message, default=default) def rehash(self): """ Reloads the configuration file and applies changes. """ self.log.info("Rehashing...") self.config.reload() d = self._unbindPorts() # Unbind the ports that are bound if d: # And then bind the new ones DeferredList(d).addCallback(lambda result: self._bindPorts()) else: self._bindPorts() try: self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"])) except (KeyError, InvalidLogLevelError): pass # If we can't set a new log level, we'll keep the old one for module in self.loadedModules.itervalues(): module.rehash() def _bindPorts(self): for bindDesc in self.config["bind_client"]: try: endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc)) except ValueError as e: self.log.error(e) continue listenDeferred = endpoint.listen(UserFactory(self)) listenDeferred.addCallback(self._savePort, bindDesc, "client") listenDeferred.addErrback(self._logNotBound, bindDesc) for bindDesc in self.config["bind_server"]: try: endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc)) except ValueError as e: self.log.error(e) continue listenDeferred = endpoint.listen(ServerListenFactory(self)) listenDeferred.addCallback(self._savePort, bindDesc, "server") listenDeferred.addErrback(self._logNotBound, bindDesc) def _unbindPorts(self): deferreds = [] for port in self.boundPorts.itervalues(): d = port.stopListening() if d: deferreds.append(d) return deferreds def _savePort(self, port, desc, portType): self.boundPorts[desc] = port self.log.debug("Bound endpoint '{endpointDescription}' for {portType} connections.", endpointDescription=desc, portType=portType) def _logNotBound(self, err, desc): self.log.error("Could not bind '{endpointDescription}': {errorMsg}", endpointDescription=desc, errorMsg=err) def createUUID(self): """ Gets the next UUID for a new client. """ newUUID = self.serverID + self._uid.next() while newUUID in self.users: # It'll take over 1.5 billion connections to loop around, but we still newUUID = self.serverID + self._uid.next() # want to be extra safe and avoid collisions self.log.debug("Generated new UUID {uuid}", uuid=newUUID) return newUUID def _genUID(self): uid = "AAAAAA" while True: yield uid uid = self._incrementUID(uid) def _incrementUID(self, uid): if uid == "Z": # The first character must be a letter return "A" # So wrap that around if uid[-1] == "9": return self._incrementUID(uid[:-1]) + "A" if uid[-1] == "Z": return uid[:-1] + "0" return uid[:-1] + chr(ord(uid[-1]) + 1) def pruneQuit(self): compareTime = now() - timedelta(seconds=10) remove = [] for uuid, timeQuit in self.recentlyQuitUsers.iteritems(): if timeQuit < compareTime: remove.append(uuid) for uuid in remove: del self.recentlyQuitUsers[uuid] remove = [] for serverID, timeQuit in self.recentlyQuitServers.iteritems(): if timeQuit < compareTime: remove.append(serverID) for serverID in remove: del self.recentlyQuitServers[serverID] def pruneChannels(self): removeChannels = [] for channel, remove in self.recentlyDestroyedChannels.iteritems(): if remove: removeChannels.append(channel) elif channel not in self.channels: self.recentlyDestroyedChannels[channel] = True for channel in removeChannels: del self.recentlyDestroyedChannels[channel] def generateISupportList(self): isupport = self.isupport_tokens.copy() statusSymbolOrder = "".join([self.channelStatuses[status][0] for status in self.channelStatusOrder]) isupport["CHANMODES"] = ",".join(["".join(modes) for modes in self.channelModes]) isupport["CHANNELLEN"] = self.config.get("channel_name_length", 64) isupport["NETWORK"] = self.config["network_name"] isupport["PREFIX"] = "({}){}".format("".join(self.channelStatusOrder), statusSymbolOrder) isupport["STATUSMSG"] = statusSymbolOrder isupport["USERMODES"] = ",".join(["".join(modes) for modes in self.userModes]) self.runActionStandard("buildisupport", isupport) isupportList = [] for key, val in isupport.iteritems(): if val is None: isupportList.append(key) else: isupportList.append("{}={}".format(key, val)) return isupportList def connectServer(self, name): """ Connect a server with the given name in the configuration. Returns a Deferred for the connection when we can successfully connect or None if the server is already connected or if we're unable to find information for that server in the configuration. """ if name in self.serverNames: return None if name not in self.config.get("links", {}): return None serverConfig = self.config["links"][name] endpoint = clientFromString(reactor, unescapeEndpointDescription(serverConfig["connect_descriptor"])) d = endpoint.connect(ServerConnectFactory(self)) d.addCallback(self._completeServerConnection, name) return d def _completeServerConnection(self, result, name): self.log.info("Connected to server {serverName}", serverName=name) self.runActionStandard("initiateserverconnection", result) def broadcastToServers(self, fromServer, command, *params, **kw): """ Broadcasts a message to all connected servers. The fromServer parameter should be the server from which the message came; if this server is the originating server, specify None for fromServer. """ for server in self.servers.itervalues(): if server.nextClosest == self.serverID and server != fromServer: server.sendMessage(command, *params, **kw) def _getActionModes(self, actionName, *params, **kw): users = [] channels = [] if "users" in kw: users = kw["users"] if "channels" in kw: channels = kw["channels"] functionList = [] if users: genericUserActionName = "modeactioncheck-user-{}".format(actionName) genericUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}".format(actionName) for modeType in self.userModes: for mode, modeObj in modeType.iteritems(): if actionName not in modeObj.affectedActions: continue priority = modeObj.affectedActions[actionName] actionList = [] # Because Python doesn't properly capture variables in lambdas, we have to force static capture # by wrapping lambdas in more lambdas. # I wish Python wasn't this gross. for action in self.actions.get("modeactioncheck-user", []): actionList.append(((lambda action, actionName, mode: lambda user, *params: action[0](actionName, mode, user, *params))(action, actionName, mode), action[1])) for action in self.actions.get("modeactioncheck-user-withchannel", []): for channel in channels: actionList.append(((lambda action, actionName, mode, channel: lambda user, *params: action[0](actionName, mode, user, channel, *params))(action, actionName, mode, channel), action[1])) for action in self.actions.get(genericUserActionName, []): actionList.append(((lambda action, mode: lambda user, *params: action[0](mode, user, *params))(action, mode), action[1])) for action in self.actions.get(genericUserActionNameWithChannel, []): for channel in channels: actionList.append(((lambda action, mode, channel: lambda user, *params: action[0](mode, user, channel, *params))(action, mode, channel), action[1])) modeUserActionName = "modeactioncheck-user-{}-{}".format(mode, actionName) modeUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}-{}".format(mode, actionName) for action in self.actions.get(modeUserActionNameWithChannel, []): for channel in channels: actionList.append(((lambda action, channel: lambda user, *params: action[0](user, channel, *params))(action, channel), action[1])) actionList = sorted(self.actions.get(modeUserActionName, []) + actionList, key=lambda action: action[1], reverse=True) applyUsers = [] for user in users: for action in actionList: param = action[0](user, *params) if param is not None: if param is not False: applyUsers.append((user, param)) break for user, param in applyUsers: functionList.append(((lambda modeObj, actionName, user, param: lambda *params: modeObj.apply(actionName, user, param, *params))(modeObj, actionName, user, param), priority)) if channels: genericChannelActionName = "modeactioncheck-channel-{}".format(actionName) genericChannelActionNameWithUser = "******".format(actionName) for modeType in self.channelModes: for mode, modeObj in modeType.iteritems(): if actionName not in modeObj.affectedActions: continue priority = modeObj.affectedActions[actionName] actionList = [] for action in self.actions.get("modeactioncheck-channel", []): actionList.append(((lambda action, actionName, mode: lambda channel, *params: action[0](actionName, mode, channel, *params))(action, actionName, mode), action[1])) for action in self.actions.get("modeactioncheck-channel-withuser", []): for user in users: actionList.append(((lambda action, actionName, mode, user: lambda channel, *params: action[0](actionName, mode, channel, user, *params))(action, actionName, mode, user), action[1])) for action in self.actions.get(genericChannelActionName, []): actionList.append(((lambda action, mode: lambda channel, *params: action[0](mode, channel, *params))(action, mode), action[1])) for action in self.actions.get(genericChannelActionNameWithUser, []): for user in users: actionList.append(((lambda action, mode, user: lambda channel, *params: action[0](mode, channel, user, *params))(action, mode, user), action[1])) modeChannelActionName = "modeactioncheck-channel-{}-{}".format(mode, actionName) modeChannelActionNameWithUser = "******".format(mode, actionName) for action in self.actions.get(modeChannelActionNameWithUser, []): for user in users: actionList.append(((lambda action, user: lambda channel, *params: action[0](channel, user, *params))(action, user), action[1])) actionList = sorted(self.actions.get(modeChannelActionName, []) + actionList, key=lambda action: action[1], reverse=True) applyChannels = [] for channel in channels: for action in actionList: param = action[0](channel, *params) if param is not None: if param is not False: applyChannels.append((channel, param)) break for channel, param in applyChannels: functionList.append(((lambda modeObj, actionName, channel, param: lambda *params: modeObj.apply(actionName, channel, param, *params))(modeObj, actionName, channel, param), priority)) return functionList def _getActionFunctionList(self, actionName, *params, **kw): functionList = self.actions.get(actionName, []) functionList = functionList + self._getActionModes(actionName, *params, **kw) return sorted(functionList, key=lambda action: action[1], reverse=True) def _combineActionFunctionLists(self, actionLists): """ Combines multiple lists of action functions into one. Assumes all lists are sorted. Takes a dict mapping action names to their action function lists. Returns a list in priority order (highest to lowest) of (actionName, function) tuples. """ fullActionList = [] for actionName, actionList in actionLists.iteritems(): insertPos = 0 for action in actionList: try: while fullActionList[insertPos][1] > action[1]: insertPos += 1 fullActionList.insert(insertPos, (actionName, action[0])) except IndexError: fullActionList.append((actionName, action[0])) insertPos += 1 return fullActionList def runActionStandard(self, actionName, *params, **kw): """ Calls all functions for a given action with the given parameters in priority order. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: action[0](*params) def runActionUntilTrue(self, actionName, *params, **kw): """ Calls functions for a given action with the given parameters in priority order until one of them returns a true value. Returns True when one of the functions returned True. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: if action[0](*params): return True return False def runActionUntilFalse(self, actionName, *params, **kw): """ Calls functions for a given action with the given parameters in priority order until one of them returns a false value. Returns True when one of the functions returned False. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: if not action[0](*params): return True return False def runActionUntilValue(self, actionName, *params, **kw): """ Calls functions for a given action with the given parameters in priority order until one of them returns a non-None value. Returns the value returned by the function that returned a non-None value. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: value = action[0](*params) if value is not None: return value return None def runActionFlagTrue(self, actionName, *params, **kw): """ Calls all functions for a given action with the given parameters in priority order. Returns True when one of the functions returns a true value. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ oneIsTrue = False actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: if action[0](*params): oneIsTrue = True return oneIsTrue def runActionFlagFalse(self, actionName, *params, **kw): """ Calls all functions for a given action with the given parameters in priority order. Returns True when one of the functions returns a false value. Accepts the 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ oneIsFalse = False actionList = self._getActionFunctionList(actionName, *params, **kw) for action in actionList: if action[0](*params): oneIsFalse = True return oneIsFalse def runActionProcessing(self, actionName, data, *params, **kw): """ Calls functions for a given action with the given parameters in priority order until the provided data is all processed (the data parameter becomes empty). Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionList = self._getActionFunctionList(actionName, data, *params, **kw) for action in actionList: action[0](data, *params) if not data: return def runActionProcessingMultiple(self, actionName, dataList, *params, **kw): """ Calls functions for a given action with the given parameters in priority order until the provided data is all processed (all of the data structures in the dataList parameter become empty). Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ paramList = dataList + params actionList = self._getActionFunctionList(actionName, *paramList, **kw) for action in actionList: action[0](*paramList) for data in dataList: if data: break else: return def runComboActionStandard(self, actionList, **kw): """ Calls all functions for the given actions with the given parameters in priority order. Actions are specifed as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: actionFunc(*actionParameters[actionName]) def runComboActionUntilTrue(self, actionList, **kw): """ Calls functions for the given actions with the given parameters in priority order until one of the functions returns a true value. Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Returns True if one of the functions returned a true value. Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: if actionFunc(*actionParameters[actionName]): return True return False def runComboActionUntilFalse(self, actionList, **kw): """ Calls functions for the given actions with the given parameters in priority order until one of the functions returns a false value. Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Returns True if one of the functions returned a false value. Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: if not actionFunc(*actionParameters[actionName]): return True return False def runComboActionUntilValue(self, actionList, **kw): """ Calls functions for the given actions with the given parameters in priority order until one of the functions returns a non-None value. Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Returns the value returned by the function that returned a non-None value. Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: value = actionFunc(*actionParameters[actionName]) if value is not None: return value return None def runComboActionFlagTrue(self, actionList, **kw): """ Calls all functions for the given actions with the given parameters in priority order. Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Returns True if any of the functions called returned a true value. Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) oneIsTrue = False for actionName, actionFunc in funcList: if actionFunc(*actionParameters[actionName]): oneIsTrue = True return oneIsTrue def runComboActionFlagFalse(self, actionList, **kw): """ Calls all functions for the given actions with the given parameters in priority order. Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Returns True if any of the functions called returned a false value. Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) oneIsFalse = False for actionName, actionFunc in funcList: if not actionFunc(*actionParameters[actionName]): oneIsFalse = True return oneIsFalse def runComboActionProcessing(self, data, actionList, **kw): """ Calls functions for the given actions with the given parameters in priority order until the data given has been processed (the data parameter becomes empty). Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = [data] + action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: actionFunc(*actionParameters[actionName]) if not data: break def runComboActionProcessingMultiple(self, dataList, actionList, **kw): """ Calls functions for the given actions with the given parameters in priority order until the data given has been processed (all the data items in the dataList parameter become empty). Actions are specified as a list of tuples: [ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ] Accepts 'users' and 'channels' keyword arguments to determine which mode handlers should be included. """ actionFuncLists = {} actionParameters = {} for action in actionList: parameters = dataList + action[1:] actionParameters[action[0]] = parameters actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw) funcList = self._combineActionFunctionLists(actionFuncLists) for actionName, actionFunc in funcList: actionFunc(*actionParameters[actionName]) for data in dataList: if data: break else: return
def setup_structured_logging( hs, config, log_config: dict, logBeginner: LogBeginner, redirect_stdlib_logging: bool = True, ) -> LogPublisher: """ Set up Twisted's structured logging system. Args: hs: The homeserver to use. config (HomeserverConfig): The configuration of the Synapse homeserver. log_config (dict): The log configuration to use. """ if config.no_redirect_stdio: raise ConfigError( "no_redirect_stdio cannot be defined using structured logging.") logger = Logger() if "drains" not in log_config: raise ConfigError( "The logging configuration requires a list of drains.") observers = [] # type: List[ILogObserver] for observer in parse_drain_configs(log_config["drains"]): # Pipe drains if observer.type == DrainType.CONSOLE: logger.debug("Starting up the {name} console logger drain", name=observer.name) observers.append(SynapseFileLogObserver(observer.location)) elif observer.type == DrainType.CONSOLE_JSON: logger.debug("Starting up the {name} JSON console logger drain", name=observer.name) observers.append(jsonFileLogObserver(observer.location)) elif observer.type == DrainType.CONSOLE_JSON_TERSE: logger.debug( "Starting up the {name} terse JSON console logger drain", name=observer.name, ) observers.append( TerseJSONToConsoleLogObserver(observer.location, metadata={})) # File drains elif observer.type == DrainType.FILE: logger.debug("Starting up the {name} file logger drain", name=observer.name) log_file = open(observer.location, "at", buffering=1, encoding="utf8") observers.append(SynapseFileLogObserver(log_file)) elif observer.type == DrainType.FILE_JSON: logger.debug("Starting up the {name} JSON file logger drain", name=observer.name) log_file = open(observer.location, "at", buffering=1, encoding="utf8") observers.append(jsonFileLogObserver(log_file)) elif observer.type == DrainType.NETWORK_JSON_TERSE: metadata = {"server_name": hs.config.server_name} log_observer = TerseJSONToTCPLogObserver( hs=hs, host=observer.location[0], port=observer.location[1], metadata=metadata, maximum_buffer=observer.options.maximum_buffer, ) log_observer.start() observers.append(log_observer) else: # We should never get here, but, just in case, throw an error. raise ConfigError("%s drain type cannot be configured" % (observer.type, )) publisher = StoppableLogPublisher(*observers) log_filter = LogLevelFilterPredicate() for namespace, namespace_config in log_config.get("loggers", DEFAULT_LOGGERS).items(): # Set the log level for twisted.logger.Logger namespaces log_filter.setLogLevelForNamespace( namespace, stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")), ) # Also set the log levels for the stdlib logger namespaces, to prevent # them getting to PythonStdlibToTwistedLogger and having to be formatted if "level" in namespace_config: logging.getLogger(namespace).setLevel( namespace_config.get("level")) f = FilteringLogObserver(publisher, [log_filter]) lco = LogContextObserver(f) if redirect_stdlib_logging: stuff_into_twisted = PythonStdlibToTwistedLogger(lco) stdliblogger = logging.getLogger() stdliblogger.addHandler(stuff_into_twisted) # Always redirect standard I/O, otherwise other logging outputs might miss # it. logBeginner.beginLoggingTo([lco], redirectStandardIO=True) return publisher
if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A modular Twisted IRC bot.") parser.add_argument("-c", "--config", help="The configuration file to use", type=str, default="heufybot.yaml") parser.add_argument("-f", "--logfile", help="The file the log will be written to", type=str, default="heufybot.log") parser.add_argument("-l", "--loglevel", help="The logging level the bot will use", type=str, default="INFO") options = parser.parse_args() # Start the bot heufybot = HeufyBot(options.config) # Determine the logging level logFilter = LogLevelFilterPredicate() try: logFilter.setLogLevelForNamespace("heufybot", LogLevel.levelWithName(options.loglevel.lower())) invalidLogLevel = False except InvalidLogLevelError: logFilter.setLogLevelForNamespace("heufybot", LogLevel.info) invalidLogLevel = True # Set up logging logFile = DailyLogFile("heufybot.log", "") fileObserver = FileLogObserver(logFile, logFormat) fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter,)) consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,)) heufybot.log = Logger("heufybot") globalLogPublisher.addObserver(fileFilterObserver) globalLogPublisher.addObserver(consoleFilterObserver) heufybot.log.info("Starting bot...")
class AmcrestRealm: def requestAvatar(self, avatarId, mind, *interfaces): if IFTPShell in interfaces: return (IFTPShell, FTPShell(filepath.FilePath(VIDEOROOT)), lambda: None) if IMessageDelivery in interfaces: return (IMessageDelivery, ConsoleMessageDelivery(), lambda: None) raise NotImplementedError( "Unable to provide avatar for interfaces provided ({})".format( interfaces)) log = Logger("amcrest") predicate = LogLevelFilterPredicate(LogLevel.warn) predicate.setLogLevelForNamespace("amcrest", LogLevel.debug) globalLogPublisher.addObserver( FilteringLogObserver(textFileLogObserver(sys.stderr), (predicate, ))) portal = Portal(AmcrestRealm(), [ FilePasswordDB( os.path.join(os.path.dirname(os.path.abspath(__file__)), "passwords.txt")) ]) ftpfactory = FTPFactory(portal) ftpfactory.allowAnonymous = False ftpfactory.protocol = MonitoredFTP hafactory = HAFactory()
def startLogging(settings, stream=None, level=LogLevel.debug): global predicate fileObserver = logObserver(stream) predicate = LogLevelFilterPredicate(defaultLogLevel=level) if settings.options.debug_mqtt: predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug) predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug) if settings.options.debug_mqtt_driver: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug) else: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info) predicate.setLogLevelForNamespace('mqtt', LogLevel.info) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info) if settings.options.debug_influx: predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug) if settings.options.debug_io: predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug) observers = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ] globalLogBeginner.beginLoggingTo(observers)
def get_logger(logname='yombolog', **kwargs): """ Returns a logger object that allows logging of error messages. **Usage**: .. code-block:: python from yombo.core.log import get_logger logger = get_logger("module.ModuleName") logger.debug("Some status line, debug level items.") logger.info("ModuleName has finished starting is ready.") logger.warn("A warning!!") logger.error("Something really bad happened! I should quit.") :param logname: Name of the module or library. :type logname: string :return: logger object """ global loggers # A simple cache or existing loggers... if logname in loggers: return loggers[logname] global configCache loglevel = None source = kwargs.get('source', logname) json = kwargs.get('source', False) # Determine the logging level if len(loggers) == 0: config_parser = ConfigParser.SafeConfigParser() try: fp = open('yombo.ini') config_parser.readfp(fp) ini = config_parser for option in ini.options('logging'): value = ini.get('logging', option) configCache[option] = value fp.close() except IOError: pass except ConfigParser.NoSectionError: pass logFilter = LogLevelFilterPredicate() try: if logname in configCache: iniLogLevel = configCache[logname].lower() logFilter.setLogLevelForNamespace(logname, LogLevel.levelWithName(iniLogLevel)) # else: # iniLogLevel = 'info' # iniLogLevel = False # print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname) invalidLogLevel = False except InvalidLogLevelError: logFilter.setLogLevelForNamespace(logname, LogLevel.info) invalidLogLevel = True # Yell at the user if they specified an invalid log level if invalidLogLevel: loggers[logname].warn("yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.", invalidLevel=configCache[logname].lower()) # Set up logging consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,)) logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver) global logFirstRun if logFirstRun is True: logFirstRun = False # This doesn't appear to be working yet... # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("yombo.log.json", "a"))) loggers[logname] = logger return loggers[logname]
def get_logger(logname="yombolog", **kwargs): """ Returns a logger object that allows logging of error messages. **Usage**: .. code-block:: python from yombo.core.log import get_logger logger = get_logger("module.ModuleName") logger.debug("Some status line, debug level items.") logger.info("ModuleName has finished starting is ready.") logger.warn("A warning!!") logger.error("Something really bad happened! I should quit.") :param logname: Name of the module or library. :type logname: string :return: logger object """ global loggers global observers global log_levels global open_files # A simple cache or existing loggers... if logname in loggers: return loggers[logname] source = kwargs.get("source", logname) # Determine the logging level if len(loggers) == 0: try: from yombo.core.settings import logger_settings log_levels = logger_settings except ImportError: log_levels = [] ini_log_level = "info" log_filter = LogLevelFilterPredicate() logname_search = copy(logname).lower() try: while len(logname_search) > 0 and len(log_levels): try: ini_log_level = log_levels[logname_search] break except KeyError: pass # This crazy line removes the last element in the string. logname_search = ".".join(logname_search.rsplit(".")[:-1]) except InvalidLogLevelError: # Yell at the user if they specified an invalid log level loggers[logname].warn( "yombo.toml file contained invalid log level {invalidLevel}, " "level has been set to INFO instead.", invalidLevel=log_levels[logname].lower()) if isinstance(ini_log_level, str) is False: ini_log_level = "info" log_filter.setLogLevelForNamespace(logname, LogLevel.levelWithName(ini_log_level)) # Set up logging consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (log_filter, )) logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver) loggers[logname] = logger return loggers[logname]
class _LogManager(object): """ Twisted logger manager: tracks filtering predicates to support dynamic per-namespace log level changes at runtime and supports adding/removing observers at runtime. """ def __init__(self): self._predicate = None # Track dynamically added/removed observers: # - keys: observers # - values: FilteringLogObserver instances self._observers = {} def setup(self, level='warn', namespace_levels=None, text_file=sys.stderr, time_format='%H:%M:%S.%f', handle_stdlib=True, stdlib_level='notset', stdlib_prefix='stdlib.'): """ Initiates the twisted.logger system: - level: default log level as a string (ie: 'warn', 'info', ....). - namespace_levels: a dict of namespaces/log level names. - text_file: where to write the log to. - time_format: as supported by datetime.strftime. - handle_stdlib: True/False. - stdlib_level: level name, above which stdlib logging is handled. - stdlib_prefix: added to stdlib logger name, used as namespace. """ file_observer = textFileLogObserver(text_file, timeFormat=time_format) self._predicate = LogLevelFilterPredicate( defaultLogLevel=LogLevel.levelWithName(level), ) if namespace_levels: for namespace, level_name in namespace_levels.items(): level = LogLevel.levelWithName(level_name) self._predicate.setLogLevelForNamespace(namespace, level) globalLogBeginner.beginLoggingTo([self._filtered_observer(file_observer)]) if handle_stdlib: self._handle_stdlib(stdlib_level, stdlib_prefix) def _filtered_observer(self, observer): # Wraps `observer` in a t.l.FilteringLogObserver using the # shared t.l.LogLevelFilterPredicate in `self._predicate` return FilteringLogObserver(observer, [self._predicate]) @staticmethod def _handle_stdlib(level_name, prefix): """ Directs standard library logging records to twisted.logger. Standard library log recods will be handled at `level_name` or above and logged to a namespace prefixed by `prefix`. """ stdlib_root_logger = logging.getLogger() try: level = getattr(logging, level_name.upper()) except AttributeError: raise ValueError(level_name) stdlib_root_logger.setLevel(level) handler = _TwistedLoggerHandler(prefix) stdlib_root_logger.addHandler(handler) def set_level(self, namespace=None, level_name=None): """ Change the logging level of namespace to level. If namespace is None, sets all namespaces to level_name. If level_name is None, uses the default log level. """ if level_name: level = LogLevel.levelWithName(level_name) else: level = self._predicate.defaultLogLevel if namespace: self._predicate.setLogLevelForNamespace(namespace, level) else: self._predicate.defaultLogLevel = level self._predicate.clearLogLevels() def add_observer(self, observer): """ Wraps `observer` in a FilteringLogObserver and adds it to the global log publisher (filtering agrees with `setup` and `set_level` calls). """ filtered_observer = self._filtered_observer(observer) globalLogPublisher.addObserver(filtered_observer) self._observers[observer] = filtered_observer def remove_observer(self, observer): """ Locates the FilteringLogObserver wrapping `observer` and removes it from the global log publisher. """ filtered_observer = self._observers[observer] globalLogPublisher.removeObserver(filtered_observer)
def get_logger(logname="yombolog", **kwargs): """ Returns a logger object that allows logging of error messages. **Usage**: .. code-block:: python from yombo.core.log import get_logger logger = get_logger("module.ModuleName") logger.debug("Some status line, debug level items.") logger.info("ModuleName has finished starting is ready.") logger.warn("A warning!!") logger.error("Something really bad happened! I should quit.") :param logname: Name of the module or library. :type logname: string :return: logger object """ global loggers global observers global log_levels global open_files # A simple cache or existing loggers... if logname in loggers: return loggers[logname] source = kwargs.get("source", logname) # Determine the logging level if len(loggers) == 0: import yombo.core.settings as settings if settings.yombo_ini is not False and "logging" in settings.yombo_ini: log_levels = settings.yombo_ini["logging"] log_filter = LogLevelFilterPredicate() log_name_search = copy(logname) try: ini_log_level = "info" while len(log_name_search) > 0: if log_name_search in log_levels: ini_log_level = log_levels[log_name_search].lower() break # This crazy line removes the last element in the string. log_name_search = ".".join(log_name_search.rsplit(".")[:-1]) log_filter.setLogLevelForNamespace( logname, LogLevel.levelWithName(ini_log_level)) except InvalidLogLevelError: log_filter.setLogLevelForNamespace(logname, LogLevel.info) # Yell at the user if they specified an invalid log level loggers[logname].warn( "yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.", invalidLevel=log_levels[logname].lower()) # Set up logging consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (log_filter, )) logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver) loggers[logname] = logger # global logFirstRun # if logFirstRun is True: # logFirstRun = False # This doesn't appear to be working yet... # observers['json'] = jsonFileLogObserver(io.open("usr/log/yombo.json", "a")) # globalLogPublisher.addObserver(observers['json']) # observers['text'] = textFileLogObserver(io.open("usr/log/yombo.text", "a")) # globalLogPublisher.addObserver(observers['text']) # # # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("usr/log/yombo.json", "a"))) # # globalLogPublisher.addObserver(textFileLogObserver(io.open("usr/log/yombo.text", "a"))) # # # if get_logger.rotate_loop is None: # get_logger.rotate_loop = LoopingCall(rotate_logs) # get_logger.rotate_loop.start(5, False) # about every 10 minutes # # get_logger.rotate_loop.start(615, False) # about every 10 minutes return loggers[logname]
def startLogging(settings, stream=None, level=LogLevel.debug): global predicate fileObserver = logObserver(stream) predicate = LogLevelFilterPredicate(defaultLogLevel=level) if settings.options.debug_mqtt: predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug) predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug) if settings.options.debug_mqtt_driver: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug) else: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info) predicate.setLogLevelForNamespace('mqtt', LogLevel.info) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info) if settings.options.debug_influx: predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug) if settings.options.debug_io: predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug) if globalLogBeginner._temporaryObserver is not None: observers = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ] globalLogBeginner.beginLoggingTo(observers)
def get_logger(logname='yombolog', **kwargs): """ Returns a logger object that allows logging of error messages. **Usage**: .. code-block:: python from yombo.core.log import get_logger logger = get_logger("module.ModuleName") logger.debug("Some status line, debug level items.") logger.info("ModuleName has finished starting is ready.") logger.warn("A warning!!") logger.error("Something really bad happened! I should quit.") :param logname: Name of the module or library. :type logname: string :return: logger object """ global loggers global observers global configCache global open_files # A simple cache or existing loggers... if logname in loggers: return loggers[logname] loglevel = None source = kwargs.get('source', logname) json = kwargs.get('source', False) # Determine the logging level if len(loggers) == 0: config_parser = configparser.SafeConfigParser() try: fp = open('yombo.ini') config_parser.readfp(fp) ini = config_parser for option in ini.options('logging'): value = ini.get('logging', option) configCache[option] = value fp.close() except IOError: pass except configparser.NoSectionError: pass logFilter = LogLevelFilterPredicate() try: if logname in configCache: iniLogLevel = configCache[logname].lower() logFilter.setLogLevelForNamespace( logname, LogLevel.levelWithName(iniLogLevel)) # else: # iniLogLevel = 'info' # iniLogLevel = False # print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname) invalidLogLevel = False except InvalidLogLevelError: logFilter.setLogLevelForNamespace(logname, LogLevel.info) invalidLogLevel = True # Yell at the user if they specified an invalid log level if invalidLogLevel: loggers[logname].warn( "yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.", invalidLevel=configCache[logname].lower()) # Set up logging consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter, )) logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver) loggers[logname] = logger # global logFirstRun # if logFirstRun is True: # logFirstRun = False # This doesn't appear to be working yet... # observers['json'] = jsonFileLogObserver(io.open("usr/log/yombo.json", "a")) # globalLogPublisher.addObserver(observers['json']) # observers['text'] = textFileLogObserver(io.open("usr/log/yombo.text", "a")) # globalLogPublisher.addObserver(observers['text']) # # # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("usr/log/yombo.json", "a"))) # # globalLogPublisher.addObserver(textFileLogObserver(io.open("usr/log/yombo.text", "a"))) # # # if get_logger.rotate_loop is None: # get_logger.rotate_loop = LoopingCall(rotate_logs) # get_logger.rotate_loop.start(5, False) # about every 10 minutes # # get_logger.rotate_loop.start(615, False) # about every 10 minutes return loggers[logname]
type=str, default="desertbot.log") parser.add_argument("-l", "--loglevel", help="The logging level the bot will use", type=str, default="INFO") options = parser.parse_args() # Start the bot desertbot = DesertBot(options.config) # Determine the logging level logFilter = LogLevelFilterPredicate() try: logFilter.setLogLevelForNamespace( "desertbot", LogLevel.levelWithName(options.loglevel.lower())) invalidLogLevel = False except InvalidLogLevelError: logFilter.setLogLevelForNamespace("desertbot", LogLevel.info) invalidLogLevel = True # Set up logging logFile = DailyLogFile("desertbot.log", "") fileObserver = FileLogObserver(logFile, logFormat) fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter, )) consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter, )) desertbot.log = Logger("desertbot") globalLogPublisher.addObserver(fileFilterObserver) globalLogPublisher.addObserver(consoleFilterObserver)