def init(outFile): level = levels[config.LOG_LEVEL] predicate = LogLevelFilterPredicate(defaultLogLevel=level) observer = FilteringLogObserver(textFileLogObserver(outFile=outFile), [predicate]) observer._encoding = "utf-8" globalLogPublisher.addObserver(observer) log.info("Start logging with {l}", l=level)
def init_logging(context): session_files = context["session_files"] # Setup logging log_filename = session_files.session_dir / "singt.log" logfile = open(log_filename, 'w') logtargets = [] # Set up the log observer for stdout. logtargets.append( FilteringLogObserver( textFileLogObserver(sys.stdout), predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn ) ) # Set up the log observer for our log file. "debug" is the highest possible level. logtargets.append( FilteringLogObserver( textFileLogObserver(logfile), predicates=[LogLevelFilterPredicate(LogLevel.debug)] ) ) # Direct the Twisted Logger to log to both of our observers. globalLogBeginner.beginLoggingTo(logtargets) # ASCII-art title title = art.text2art("Singt Client") log.info("\n"+title)
def __init__(self, observer, predicates, negativeObserver=lambda event: None): MockFilteringLogObserver.observer = observer MockFilteringLogObserver.predicates = predicates FilteringLogObserver.__init__(self, observer, predicates, negativeObserver)
def __init__( self, observer, predicates, negativeObserver=lambda event: None ): MockFilteringLogObserver.observer = observer MockFilteringLogObserver.predicates = predicates FilteringLogObserver.__init__( self, observer, predicates, negativeObserver )
def __init__( self, observer: ILogObserver, predicates: Iterable[LogLevelFilterPredicate], negativeObserver: ILogObserver = cast(ILogObserver, lambda event: None), ): MockFilteringLogObserver.observer = observer MockFilteringLogObserver.predicates = list(predicates) FilteringLogObserver.__init__(self, observer, predicates, negativeObserver)
def run_client(address, username): title = art.text2art("Singt") print(title) # Setup logging logfile = open(f"client-{username}.log", 'w') logtargets = [] # Set up the log observer for stdout. logtargets.append( FilteringLogObserver( textFileLogObserver(sys.stdout), predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn )) # Set up the log observer for our log file. "debug" is the highest possible level. logtargets.append( FilteringLogObserver( textFileLogObserver(logfile), predicates=[LogLevelFilterPredicate(LogLevel.debug)])) # Direct the Twisted Logger to log to both of our observers. globalLogBeginner.beginLoggingTo(logtargets) # Start a logger with a namespace for a particular subsystem of our application. log = Logger("client") # TCP # === point = TCP4ClientEndpoint(reactor, address, 1234) client = TCPClient(username) d = connectProtocol(point, client) def err(failure): print("An error occurred:", failure) d.addErrback(err) # UDP # === # 0 means any port, we don't care in this case udp_client = UDPClient(address, 12345) reactor.listenUDP(0, udp_client) # Reactor # ======= print("Running reactor") reactor.run() print("Finished.")
def startLogging(console=True, filepath=None): ''' Starts the global Twisted logger subsystem with maybe stdout and/or a file specified in the config file ''' global logLevelFilterPredicate observers = [] if console: observers.append( FilteringLogObserver(observer=textFileLogObserver(sys.stdout), predicates=[logLevelFilterPredicate] )) if filepath is not None and filepath != "": observers.append( FilteringLogObserver(observer=textFileLogObserver(open(filepath,'a')), predicates=[logLevelFilterPredicate] )) globalLogBeginner.beginLoggingTo(observers)
def test_doubleEncodingError(self): """ If it is not possible to encode a response to the request (for example, because L{xmlrpclib.dumps} raises an exception when encoding a L{Fault}) the exception which prevents the response from being generated is logged and the request object is finished anyway. """ logObserver = EventLoggingObserver() filtered = FilteringLogObserver( logObserver, [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) globalLogPublisher.addObserver(filtered) self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered)) d = self.proxy().callRemote("echo", "") # *Now* break xmlrpclib.dumps. Hopefully the client already used it. def fakeDumps(*args, **kwargs): raise RuntimeError("Cannot encode anything at all!") self.patch(xmlrpclib, "dumps", fakeDumps) # It doesn't matter how it fails, so long as it does. Also, it happens # to fail with an implementation detail exception right now, not # something suitable as part of a public interface. d = self.assertFailure(d, Exception) def cbFailed(ignored): # The fakeDumps exception should have been logged. self.assertEquals(1, len(logObserver)) self.assertIsInstance(logObserver[0]["log_failure"].value, RuntimeError) self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) d.addCallback(cbFailed) return d
def _log_summary_for_results(self, config_results, connectivity_results, logger): """ Logs the summaries of the config and connectivity results for the tested sections Args: config_results (dict): map of section name to section result for the config validation connectivity_results (dict): map of section name to section result for the connectivity validation logger (Logger): the Twisted logger to write to """ # Wrap the logger's observer inside a FilteringLogObserver so we can # control the minimum log level that gets printed via a predicate. filtering_predicate = LogLevelFilterPredicate() original_observer = logger.observer logger.observer = FilteringLogObserver(original_observer, [filtering_predicate]) # Print the cross config results first. cross_config_result = config_results.get(self.CROSS_CONFIG_KEY) if cross_config_result and self._has_summary_to_print( cross_config_result): self._log_section_summary([cross_config_result], logger, filtering_predicate, "Cross-section results") # Then log the summaries for the real config sections self._log_section_summaries(config_results, connectivity_results, logger, filtering_predicate) # Put the logger's observer back to its original value logger.observer = original_observer
def get_json_log_observer(): f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files) observer = jsonFileLogObserver(f) filterer = FilteringLogObserver(observer, [LogLevelFilterPredicate( LogLevel.levelWithName(log_level))]) return filterer
def make_wrapped_observer(observer, log_level_name): log_level = LogLevel.lookupByName(log_level_name.lower()) observer = LegacyLogObserverWrapper(observer.emit) observer = wrap_observer(observer) predicate = LogLevelFilterPredicate(defaultLogLevel=log_level) observer = FilteringLogObserver(observer, [predicate]) return observer
def startLogging(settings, stream=None, level=LogLevel.debug): global predicate fileObserver = logObserver(stream) predicate = LogLevelFilterPredicate(defaultLogLevel=level) if settings.options.debug_mqtt: predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug) predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug) if settings.options.debug_mqtt_driver: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('mqtt', LogLevel.debug) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug) else: predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info) predicate.setLogLevelForNamespace('mqtt', LogLevel.info) predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info) if settings.options.debug_influx: predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug) if settings.options.debug_io: predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug) if globalLogBeginner._temporaryObserver is not None: observers = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ] globalLogBeginner.beginLoggingTo(observers)
def __init__(self, config): # Dictionary to hold active clients. self.clients = {} # Set up logging. # TODO: Use config parameters here. self.log = Logger() # Logging target. log_observer = textFileLogObserver(sys.stdout) # Filter out levels to the specified severity. logging_level_predicate = [LogLevelFilterPredicate(LogLevel.debug)] # Set up an Observer to actually perform the filtering. log_filter = FilteringLogObserver(textFileLogObserver(sys.stdout), predicates=logging_level_predicate) # And register global logging for the filtering observer. globalLogBeginner.beginLoggingTo([log_filter]) # Passed-in game configuration. self.configuration = config # Game data. self.game = game.GameData() # Init main game loop. self.game_loop = task.LoopingCall(self.GameLoop) self.game_loop.start(30) # Holds a (cancelable! - just "self.shutdown.cancel()") callback for shutting down the server as needed. self.shutdown = None
def setup_logging(log_level, log_name, log_directory=""): """ Configure the logger to use the specified log file and log level """ log_filter = LogLevelFilterPredicate() log_filter.setLogLevelForNamespace( "orscanner", LogLevel.levelWithName(log_level.lower())) # Set up logging log_file = DailyLogFile(log_name, log_directory) file_observer = FileLogObserver(log_file, log_event_format) console_observer = FileLogObserver(sys.stdout, log_event_format) file_filter_observer = FilteringLogObserver(file_observer, (log_filter, )) console_filter_observer = FilteringLogObserver(console_observer, (log_filter, )) globalLogPublisher.addObserver(file_filter_observer) globalLogPublisher.addObserver(console_filter_observer)
def start_logging(session_files): logfile = open(session_files.session_dir / "server.log", 'w') logtargets = [] # Set up the log observer for stdout. logtargets.append( FilteringLogObserver( textFileLogObserver(sys.stdout), predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn )) # Set up the log observer for our log file. "debug" is the highest possible level. logtargets.append( FilteringLogObserver( textFileLogObserver(logfile), predicates=[LogLevelFilterPredicate(LogLevel.debug)])) # Direct the Twisted Logger to log to both of our observers. globalLogBeginner.beginLoggingTo(logtargets)
def beginLoggingTo(self, observers, discardBuffer=False, redirectStandardIO=True): new_observers = [] for observer in observers: new_observers.append( FilteringLogObserver(observer, [Logger.filterPredicate])) self.beginner.beginLoggingTo(new_observers, discardBuffer, redirectStandardIO)
def __init__(self, logger, url, header=None, **kwargs): """X-Ray WebSocket client base class Arguments: url: The URI of the endpoint where the device is connected """ # if necessary, convert serial to a unicode string u = urlparse(url) self.host = u.hostname if u.port: self.port = u.port else: if u.scheme == "ws": self.port = 80 else: self.port = 443 self.ws_factory = None self._logger = logger self._is_shutdown = False predicate = LogLevelFilterPredicate(LogLevel.error) try: if logger.isEnabledFor(logging.DEBUG): setDebugging(True) predicate = LogLevelFilterPredicate(LogLevel.debug) if logger.isEnabledFor(LOG_PROTOCOL_TRACE): txaio.set_global_log_level('trace') else: txaio.set_global_log_level('debug') else: txaio.set_global_log_level('info') except Exception as exc: logger.error(exc) globalLogPublisher.addObserver( FilteringLogObserver(STDLibLogObserver(name=logger.name), predicates=[predicate])) self.ws_factory = self.get_factory(url, header) self.ws_factory.d.addErrback(self._eb) if self.ws_factory.isSecure: contextFactory = ssl.ClientContextFactory() else: contextFactory = None def cleanup(): self.ws_factory.d.cancel() reactor.addSystemEventTrigger('after', 'shutdown', cleanup) connectWS(self.ws_factory, contextFactory)
def divert_logger(): # noinspection PyPackageRequirements from twisted.logger import FilteringLogObserver, LogLevel, LogLevelFilterPredicate, STDLibLogObserver, globalLogBeginner showwarning = warnings.showwarning globalLogBeginner.beginLoggingTo([ FilteringLogObserver( STDLibLogObserver(), [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) ], redirectStandardIO=False) warnings.showwarning = showwarning # twisted's beginLoggingTo() will divert python warnings to its own logging system. here we undo that.
def startLogging(cls, logOutput, levelStr='debug'): if isinstance(logOutput, str): dir = os.path.dirname(logOutput) if dir and not os.path.exists(dir): os.makedirs(dir) logOutput = open(logOutput, 'a+') level = LogLevel.levelWithName(levelStr) predicate = LogLevelFilterPredicate(defaultLogLevel=level) observer = FilteringLogObserver(textFileLogObserver(outFile=logOutput), [predicate]) globalLogPublisher.addObserver(observer)
def startLogging(file: TextIO = sys.stdout) -> None: """ Start Twisted logging system. """ fileObserver = textFileLogObserver(file) filteringObserver = FilteringLogObserver( fileObserver, (globalLogLevelPredicate,) ) globalLogBeginner.beginLoggingTo( [filteringObserver], redirectStandardIO=False, )
def init_logging(log_level): """ Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for. """ log_level_filter = LogLevelFilterPredicate( LogLevel.levelWithName(log_level)) log_level_filter.setLogLevelForNamespace( 'twisted.web.client._HTTP11ClientFactory', LogLevel.warn) log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [log_level_filter]) globalLogPublisher.addObserver(log_observer)
def getLogger(level): loglevel = getattr(LogLevel, level) filter_ = LogLevelFilterPredicate(defaultLogLevel=loglevel) if loglevel > LogLevel.debug: filter_.setLogLevelForNamespace('stdout', LogLevel.warn) observer = FilteringLogObserver(stdoutFileLogObserver(), [filter_]) # observer = FilteringLogObserver(globalLogPublisher, [filter]) # log = Logger() # globalLogBeginner.beginLoggingTo([observer]) globalLogPublisher.addObserver(observer) return lambda event: None
def __init__(self, udp_registry, prometheus_port=8000, gateway_port=8888, log_level='INFO'): # TODO: add available log levels # TODO: change name to collector? self._metrics = udp_registry.get_normal_metrics() self._prometheus_port = prometheus_port self._gateway_port = gateway_port self._log_level = log_level level_predicate = LogLevelFilterPredicate(LogLevel.lookupByName(self._log_level.lower())) log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), predicates=[level_predicate]) self.log = Logger(observer=log_observer)
def run(self): """ Run kernel application """ # separated from other options to give subclasses a chance to override self.parser.add_argument('-c', '--connection-file', help="Path to existing connection file") cli_args = vars(self.parser.parse_args()) # wow twisted log api sucks bigtime # all this mess just to set global log level filter_level = self._NAME_TO_LEVEL[cli_args.pop("log_level")] log_filter =\ lambda e: PredicateResult.yes if e['log_level'] >= filter_level\ else PredicateResult.no observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [log_filter]) globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False) if cli_args.get("connection_file"): connection_file =\ ConnectionFile.from_existing(cli_args.pop("connection_file")) write_connection_file = False else: connection_file = ConnectionFile.generate() write_connection_file = True self.extra_kernel_kwargs.update(cli_args) self.kernel = self.kernel_cls(connection_file.connection_props, *self.extra_kernel_args, **self.extra_kernel_kwargs) if write_connection_file: # Fix socket ports props = connection_file.connection_props props["shell_port"] = self._get_socket_port(self.kernel.shell_sock) props["control_port"] = self._get_socket_port(self.kernel.ctrl_sock) props["iopub_port"] = self._get_socket_port(self.kernel.iopub_sock) props["stdin_port"] = self._get_socket_port(self.kernel.stdin_sock) props["hb_port"] = self._get_socket_port(self.kernel.hb_sock) connection_file_path = connection_file.write_file() hint = """To connect another client to this kernel, use: --existing {}""".format(path.basename(connection_file_path)) print(hint) return task.react(lambda r: self.kernel.run())
def test_errors(self): """ Verify that for each way a method exposed via XML-RPC can fail, the correct 'Content-type' header is set in the response and that the client-side Deferred is errbacked with an appropriate C{Fault} instance. """ logObserver = EventLoggingObserver() filtered = FilteringLogObserver( logObserver, [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) globalLogPublisher.addObserver(filtered) self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered)) dl = [] for code, methodName in [ (666, "fail"), (666, "deferFail"), (12, "fault"), (23, "noSuchMethod"), (17, "deferFault"), (42, "SESSION_TEST"), ]: d = self.proxy().callRemote(methodName) d = self.assertFailure(d, xmlrpc.Fault) d.addCallback( lambda exc, code=code: self.assertEqual(exc.faultCode, code)) dl.append(d) d = defer.DeferredList(dl, fireOnOneErrback=True) def cb(ign): for factory in self.factories: self.assertEqual(factory.headers[b"content-type"], b"text/xml; charset=utf-8") self.assertEquals(2, len(logObserver)) f1 = logObserver[0]["log_failure"].value f2 = logObserver[1]["log_failure"].value if isinstance(f1, TestValueError): self.assertIsInstance(f2, TestRuntimeError) else: self.assertIsInstance(f1, TestRuntimeError) self.assertIsInstance(f2, TestValueError) self.flushLoggedErrors(TestRuntimeError, TestValueError) d.addCallback(cb) return d
def configure(verbose, quiet, shutup): client._HTTP11ClientFactory.noisy = False if not logging.getLogger().handlers: logging_handler = logging.StreamHandler(sys.stdout) logging_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(logging_handler) if verbose: from twisted.logger import ( LogLevel, globalLogBeginner, textFileLogObserver, FilteringLogObserver, LogLevelFilterPredicate) twisted_log_filtler = LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn) twisted_log_filtler.setLogLevelForNamespace(namespace="stdout", level=LogLevel.critical) twisted_log_filtler.setLogLevelForNamespace(namespace="twisted", level=LogLevel.warn) twisted_log_filtler.setLogLevelForNamespace(namespace="mqtt", level=LogLevel.warn) globalLogBeginner.beginLoggingTo([FilteringLogObserver(observer=textFileLogObserver(sys.stdout), predicates=[twisted_log_filtler])], redirectStandardIO=False) logging.getLogger().setLevel(logging.FATAL if shutup else logging.ERROR if quiet else logging.DEBUG if verbose else logging.INFO)
def startLogging(self): """ Start the L{twisted.logger} logging system. """ logFile = self.logFile fileLogObserverFactory = self.fileLogObserverFactory fileLogObserver = fileLogObserverFactory(logFile) logLevelPredicate = LogLevelFilterPredicate( defaultLogLevel=self.defaultLogLevel) filteringObserver = FilteringLogObserver(fileLogObserver, [logLevelPredicate]) globalLogBeginner.beginLoggingTo([filteringObserver])
def startLogging(self): logFile = self.options.get("logFile", sys.stderr) fileLogObserverFactory = self.options.get( "fileLogObserverFactory", textFileLogObserver ) fileObserver = fileLogObserverFactory(logFile) logLevelPredicate = LogLevelFilterPredicate( defaultLogLevel=self.options.get("logLevel", LogLevel.info) ) filteringObserver = FilteringLogObserver( fileObserver, [logLevelPredicate] ) globalLogBeginner.beginLoggingTo([filteringObserver])
def run(): from twisted.internet import reactor root = logging.getLogger() logging.getLogger("django").setLevel(logging.INFO) logging.raiseExceptions = settings.DEBUG logging._srcfile = None # Disable expensive collection of location information. root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO) root.addHandler(TwistedLoggerLogHandler()) observer = FilteringLogObserver( FileLogObserver(sys.stdout, formatForSystemd), [dropUnhandledHTTP2Shutdown], ) globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False) log.info("Yarrharr {version} starting", version=__version__) factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None) endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT) reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory) updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor)) loopEndD = updateLoop.start() loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f)) @receiver(schedule_changed) def threadPollNow(sender, **kwargs): """ When the `schedule_changed` signal is sent poke the polling loop. If it is sleeping this will cause it to poll immediately. Otherwise this will cause it to run the poll function immediately once it returns (running it again protects against races). """ log.debug("Immediate poll triggered by {sender}", sender=sender) reactor.callFromThread(updateLoop.poke) def stopUpdateLoop(): updateLoop.stop() return loopEndD reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop) reactor.run()
def startLogging(self): """ Start the L{twisted.logger} logging system. """ logFile = self.options.get(RunnerOptions.logFile, stderr) fileLogObserverFactory = self.options.get( RunnerOptions.fileLogObserverFactory, textFileLogObserver) fileLogObserver = fileLogObserverFactory(logFile) logLevelPredicate = LogLevelFilterPredicate( defaultLogLevel=self.options.get(RunnerOptions.defaultLogLevel, LogLevel.info)) filteringObserver = FilteringLogObserver(fileLogObserver, [logLevelPredicate]) globalLogBeginner.beginLoggingTo([filteringObserver])
def reset_log_file(): global log_observer if log_observer: print('removing log observer') globalLogPublisher.removeObserver(log_observer) log_level = parsed_args.log_level or config['log_level'] info_predicate = LogLevelFilterPredicate( LogLevel.levelWithName(log_level.lower())) if mlog_file_path: mlog_file = open(mlog_file_path, 'a+') else: mlog_file = sys.stderr mlog_observer = FilteringLogObserver(textFileLogObserver(mlog_file), predicates=[info_predicate]) globalLogPublisher.addObserver(mlog_observer) # logger.info('resetting log output file') return