def _load_hooks(self): hook_packages = self.get_config_option('hooks') itemNo = -1 self._hooks = [] if isinstance(hook_packages, list): for hook_config in hook_packages: try: module = hook_config["module"].split(".") if len(module) < 4: print(_("config.hooks[{}].module should have at least 4 packages {}").format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: print(_("config.hooks[{}].module must be a valid package name").format(itemNo)) continue except KeyError as e: print(_("config.hooks[{}] missing keyword").format(itemNo), e) continue theClass = class_from_name(module_name, class_name) theClass._bot = self if "config" in hook_config: # allow separate configuration file to be loaded theClass._config = hook_config["config"] if theClass.init(): print(_("_load_hooks(): {}").format(module)) self._hooks.append(theClass) else: print(_("_load_hooks(): hook failed to initialise")) message = _("_load_hooks(): {} hook(s) loaded").format(len(self._hooks)) logging.info(message)
def _load_hooks(self): hook_packages = self.get_config_option('hooks') itemNo = -1 self._hooks = [] if isinstance(hook_packages, list): for hook_config in hook_packages: try: module = hook_config["module"].split(".") if len(module) < 4: print("config.hooks[{}].module should have at least 4 packages {}".format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: print("config.hooks[{}].module must be a valid package name".format(itemNo)) continue except KeyError as e: print("config.hooks[{}] missing keyword".format(itemNo), e) continue theClass = class_from_name(module_name, class_name) theClass._bot = self if "config" in hook_config: # allow separate configuration file to be loaded theClass._config = hook_config["config"] if theClass.init(): print("_load_hooks(): {}".format(module)) self._hooks.append(theClass) else: print("_load_hooks(): hook failed to initialise") message = "_load_hooks(): {} hook(s) loaded".format(len(self._hooks)) logging.info(message)
def start(bot): shared_loop = asyncio.get_event_loop() jsonrpc_sinks = bot.get_config_option('jsonrpc') itemNo = -1 threadcount = 0 if isinstance(jsonrpc_sinks, list): for sinkConfig in jsonrpc_sinks: itemNo += 1 try: module = sinkConfig["module"].split(".") if len(module) < 3: logger.error("config.jsonrpc[{}].module should have at least 3 packages {}".format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: logger.error("config.jsonrpc[{}].module must be a valid package name".format(itemNo)) continue certfile = sinkConfig["certfile"] if not certfile: logger.error("config.jsonrpc[{}].certfile must be configured".format(itemNo)) continue name = sinkConfig["name"] port = sinkConfig["port"] except KeyError as e: logger.error("config.jsonrpc[{}] missing keyword".format(itemNo), e) continue try: handler_class = class_from_name(module_name, class_name) except (AttributeError, ImportError) as e: logger.error("not found: {} {}".format(module_name, class_name)) continue # start up rpc listener in a separate thread logger.debug("starting sink: {}".format(module)) threadmanager.start_thread(start_listening, args=( bot, shared_loop, name, port, certfile, handler_class, module_name)) threadcount = threadcount + 1 if threadcount: logger.info("{} sink(s) from config.jsonrpc".format(threadcount))
def start(bot, shared_loop): jsonrpc_sinks = bot.get_config_option('jsonrpc') itemNo = -1 threads = [] if isinstance(jsonrpc_sinks, list): for sinkConfig in jsonrpc_sinks: itemNo += 1 try: module = sinkConfig["module"].split(".") if len(module) < 3: print("config.jsonrpc[{}].module should have at least 3 packages {}".format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: print(_("config.jsonrpc[{}].module must be a valid package name").format(itemNo)) continue certfile = sinkConfig["certfile"] if not certfile: print(_("config.jsonrpc[{}].certfile must be configured").format(itemNo)) continue name = sinkConfig["name"] port = sinkConfig["port"] except KeyError as e: print(_("config.jsonrpc[{}] missing keyword").format(itemNo), e) continue try: handler_class = class_from_name(module_name, class_name) except AttributeError as e: logging.exception(e) print("could not identify sink: {} {}".format(module_name, class_name)) continue # start up rpc listener in a separate thread print(_("_start_sinks(): {}").format(module)) t = Thread(target=start_listening, args=( bot, shared_loop, name, port, certfile, handler_class, module_name)) t.daemon = True t.start() threads.append(t) message = _("_start_sinks(): {} sink thread(s) started").format(len(threads)) logging.info(message)
def _start_sinks(self, shared_loop): jsonrpc_sinks = self.get_config_option('jsonrpc') itemNo = -1 threads = [] if isinstance(jsonrpc_sinks, list): for sinkConfig in jsonrpc_sinks: itemNo += 1 try: module = sinkConfig["module"].split(".") if len(module) < 4: print( "config.jsonrpc[{}].module should have at least 4 packages {}" .format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: print( "config.jsonrpc[{}].module must be a valid package name" .format(itemNo)) continue certfile = sinkConfig["certfile"] if not certfile: print("config.jsonrpc[{}].certfile must be configured". format(itemNo)) continue name = sinkConfig["name"] port = sinkConfig["port"] except KeyError as e: print("config.jsonrpc[{}] missing keyword".format(itemNo), e) continue # start up rpc listener in a separate thread print("thread starting: {}".format(module)) t = Thread(target=start_listening, args=(self, shared_loop, name, port, certfile, class_from_name(module_name, class_name))) t.daemon = True t.start() threads.append(t) message = "{} sink thread(s) started".format(len(threads)) logging.info(message) print(message)
def load(bot): hook_packages = bot.get_config_option('hooks') itemNo = -1 bot._hooks = [] if isinstance(hook_packages, list): for hook_config in hook_packages: try: module = hook_config["module"].split(".") if len(module) < 4: logger.error("[DEPRECATED] config.hooks[{}].module should have at least 4 packages {}".format( itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: logger.error( "[DEPRECATED] config.hooks[{}].module must be a valid package name".format(itemNo)) continue except KeyError as e: logger.error( "[DEPRECATED] config.hooks[{}] missing keyword".format(itemNo), e) continue try: theClass = class_from_name(module_name, class_name) except (AttributeError, ImportError) as e: logger.error("[DEPRECATED] not found: {} {}".format( module_name, class_name)) continue theClass._bot = bot if "config" in hook_config: # allow separate configuration file to be loaded theClass._config = hook_config["config"] if theClass.init(): logger.warning("[DEPRECATED] adding hooks: {}".format(module)) bot._hooks.append(theClass) if bot._hooks: logger.warning( "[DEPRECATED] {} hook(s) from hooks".format(len(bot._hooks)))
def configure_logging(args): """Configure Logging If the user specified a logging config file, open it, and fail if unable to open. If not, attempt to open the default logging config file. If that fails, move on to basic log configuration. """ log_level = 'DEBUG' if args.debug else 'INFO' default_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console': { 'format': '%(asctime)s %(levelname)s %(name)s: %(message)s', 'datefmt': '%H:%M:%S' }, 'default': { 'format': '%(asctime)s %(levelname)s %(name)s: %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stdout', 'level': 'INFO', 'formatter': 'console' }, 'file': { 'class': 'logging.FileHandler', 'filename': args.log, 'level': log_level, 'formatter': 'default', } }, 'loggers': { # root logger '': { 'handlers': ['file', 'console'], 'level': log_level }, # requests is freakishly noisy 'requests': {'level': 'INFO'}, # XXX: suppress erroneous WARNINGs until resolution of # https://github.com/tdryer/hangups/issues/142 'hangups': {'level': 'ERROR'}, # asyncio's debugging logs are VERY noisy, so adjust the log level 'asyncio': {'level': 'WARNING'}, # hangups log is verbose too, suppress so we can debug the bot 'hangups.conversation': {'level': 'ERROR'} } } logging_config = default_config # Temporarily bring in the configuration file, just so we can configure # logging before bringing anything else up. There is no race internally, # if logging() is called before configured, it outputs to stderr, and # we will configure it soon enough bootcfg = config.Config(args.config) if bootcfg.exists(["logging.system"]): logging_config = bootcfg["logging.system"] if "extras.setattr" in logging_config: for class_attr, value in logging_config["extras.setattr"].items(): try: [modulepath, classname, attribute] = class_attr.rsplit( ".", maxsplit=2) try: setattr(class_from_name( modulepath, classname), attribute, value) except ImportError: logging.error("module {} not found".format(modulepath)) except AttributeError: logging.error("{} in {} not found".format( classname, modulepath)) except ValueError: logging.error("format should be <module>.<class>.<attribute>") logging.config.dictConfig(logging_config) logger = logging.getLogger() if args.debug: logger.setLevel(logging.DEBUG)
def configure_logging(args): """Configure Logging If the user specified a logging config file, open it, and fail if unable to open. If not, attempt to open the default logging config file. If that fails, move on to basic log configuration. """ log_level = 'DEBUG' if args.debug else 'INFO' default_config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console': { 'format': '%(asctime)s %(levelname)s %(name)s: %(message)s', 'datefmt': '%H:%M:%S' }, 'default': { 'format': '%(asctime)s %(levelname)s %(name)s: %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stdout', 'level': 'INFO', 'formatter': 'console' }, 'file': { 'class': 'logging.FileHandler', 'filename': args.log, 'level': log_level, 'formatter': 'default', } }, 'loggers': { # root logger '': { 'handlers': ['file', 'console'], 'level': log_level }, # requests is freakishly noisy 'requests': { 'level': 'INFO' }, # XXX: suppress erroneous WARNINGs until resolution of # https://github.com/tdryer/hangups/issues/142 'hangups': { 'level': 'ERROR' }, # asyncio's debugging logs are VERY noisy, so adjust the log level 'asyncio': { 'level': 'WARNING' }, # hangups log is verbose too, suppress so we can debug the bot 'hangups.conversation': { 'level': 'ERROR' } } } logging_config = default_config # Temporarily bring in the configuration file, just so we can configure # logging before bringing anything else up. There is no race internally, # if logging() is called before configured, it outputs to stderr, and # we will configure it soon enough bootcfg = config.Config(args.config) if bootcfg.exists(["logging.system"]): logging_config = bootcfg["logging.system"] if "extras.setattr" in logging_config: for class_attr, value in logging_config["extras.setattr"].items(): try: [modulepath, classname, attribute] = class_attr.rsplit(".", maxsplit=2) try: setattr(class_from_name(modulepath, classname), attribute, value) except ImportError: logging.error("module {} not found".format(modulepath)) except AttributeError: logging.error("{} in {} not found".format( classname, modulepath)) except ValueError: logging.error("format should be <module>.<class>.<attribute>") logging.config.dictConfig(logging_config) logger = logging.getLogger() if args.debug: logger.setLevel(logging.DEBUG)
def start(bot): shared_loop = asyncio.get_event_loop() jsonrpc_sinks = bot.get_config_option('jsonrpc') itemNo = -1 threadcount = 0 aiohttpcount = 0 if isinstance(jsonrpc_sinks, list): for sinkConfig in jsonrpc_sinks: itemNo += 1 try: module = sinkConfig["module"].split(".") if len(module) < 3: logger.error( "config.jsonrpc[{}].module should have at least 3 packages {}" .format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: logger.error( "config.jsonrpc[{}].module must be a valid package name" .format(itemNo)) continue certfile = sinkConfig.get("certfile") if certfile and not os.path.isfile(certfile): logger.error( "config.jsonrpc[{}].certfile not available at {}". format(itemNo, certfile)) continue name = sinkConfig["name"] port = sinkConfig["port"] except KeyError as e: logger.error( "config.jsonrpc[{}] missing keyword".format(itemNo), e) continue try: handler_class = class_from_name(module_name, class_name) except (AttributeError, ImportError) as e: logger.error("not found: {} {}".format(module_name, class_name)) continue # start up rpc listener in a separate thread logger.debug("starting sink: {}".format(module)) if issubclass(handler_class, AsyncRequestHandler): aiohttp_start(bot, name, port, certfile, handler_class, "json-rpc") aiohttpcount = aiohttpcount + 1 else: threadmanager.start_thread(start_listening, args=(bot, shared_loop, name, port, certfile, handler_class, module_name)) threadcount = threadcount + 1 if threadcount: logger.info("{} threaded listener(s)".format(threadcount)) if aiohttpcount: logger.info("{} aiohttp web listener(s)".format(aiohttpcount))
def start(bot): shared_loop = asyncio.get_event_loop() jsonrpc_sinks = bot.get_config_option("jsonrpc") itemNo = -1 threadcount = 0 aiohttpcount = 0 if isinstance(jsonrpc_sinks, list): for sinkConfig in jsonrpc_sinks: itemNo += 1 try: module = sinkConfig["module"].split(".") if len(module) < 3: logger.error("config.jsonrpc[{}].module should have at least 3 packages {}".format(itemNo, module)) continue module_name = ".".join(module[0:-1]) class_name = ".".join(module[-1:]) if not module_name or not class_name: logger.error("config.jsonrpc[{}].module must be a valid package name".format(itemNo)) continue certfile = sinkConfig.get("certfile") if certfile and not os.path.isfile(certfile): logger.error("config.jsonrpc[{}].certfile not available at {}".format(itemNo, certfile)) continue name = sinkConfig["name"] port = sinkConfig["port"] except KeyError as e: logger.error("config.jsonrpc[{}] missing keyword".format(itemNo), e) continue try: handler_class = class_from_name(module_name, class_name) except (AttributeError, ImportError) as e: logger.error("not found: {} {}".format(module_name, class_name)) continue # start up rpc listener in a separate thread logger.debug("starting sink: {}".format(module)) if issubclass(handler_class, AsyncRequestHandler): aiohttp_start(bot, name, port, certfile, handler_class, "json-rpc") aiohttpcount = aiohttpcount + 1 else: threadmanager.start_thread( start_listening, args=(bot, shared_loop, name, port, certfile, handler_class, module_name) ) threadcount = threadcount + 1 if threadcount: logger.info("{} threaded listener(s)".format(threadcount)) if aiohttpcount: logger.info("{} aiohttp web listener(s)".format(aiohttpcount))
def configure_logging(args): """Configure Logging If the user specified a logging config file, open it, and fail if unable to open. If not, attempt to open the default logging config file. If that fails, move on to basic log configuration. """ log_level = "DEBUG" if args.debug else "INFO" logging_config = { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { "format": "%(asctime)s %(levelname)s %(name)s: %(message)s", } }, "handlers": { "console": { "class": "logging.StreamHandler", "stream": "ext://sys.stdout", "level": "DEBUG" if args.debug else "WARNING", "formatter": "default" }, "file": { "class": "logging.FileHandler", "filename": args.log, "level": "DEBUG", "formatter": "default", }, "file_warnings": { "class": "logging.FileHandler", "filename": args.log.rsplit(".", 1)[0] + "_warnings.log", "level": "WARNING", "formatter": "default", } }, "loggers": { # root logger "": { "handlers": ["file", "console", "file_warnings"], "level": log_level }, # requests is freakishly noisy "requests": {"level": "INFO"}, "hangups": {"level": "WARNING"}, # ignore the addition of fallback users "hangups.user": {"level": "ERROR"}, # do not log disconnects twice, we already attach a logger to # ._client.on_disconnect "hangups.channel": {"level": "ERROR"}, # asyncio's debugging logs are VERY noisy, so adjust the log level "asyncio": {"level": "WARNING"}, } } # Temporarily bring in the configuration file, just so we can configure # logging before bringing anything else up. There is no race internally, # if logging() is called before configured, it outputs to stderr, and # we will configure it soon enough bootcfg = config.Config(args.config) bootcfg.load() if bootcfg.exists(["logging.system"]): logging_config = bootcfg["logging.system"] if "extras.setattr" in logging_config: for class_attr, value in logging_config["extras.setattr"].items(): try: [modulepath, classname, attribute] = class_attr.rsplit(".", 2) try: setattr(utils.class_from_name(modulepath, classname), attribute, value) except ImportError: logging.error("module %s not found", modulepath) except AttributeError: logging.error("%s in %s not found", classname, modulepath) except ValueError: logging.error("format should be <module>.<class>.<attribute>") logging.config.dictConfig(logging_config) if args.debug: logging.getLogger().setLevel(logging.DEBUG)