def start_upload_server(): import argparse from twisted.internet import reactor from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver from twisted.web.server import Site from twisted.web.resource import Resource from cheesepi.server.upload import UploadHandler # Argument parsing parser = argparse.ArgumentParser() parser.add_argument('--port', type=int, default=18090, help='Port to listen on') args = parser.parse_args() init_logging() # Make twisted logging write to pythons logging module globalLogPublisher.addObserver( STDLibLogObserver(name="cheesepi.server.upload")) # Use twisted logger when in twisted log = Logger() root = Resource() root.putChild("upload", UploadHandler()) upload_server = Site(root) reactor.listenTCP(args.port, upload_server) log.info("Starting upload server on port %d..." % args.port) reactor.run()
def setup_logging(log_config=None, log_file=None, verbosity=None): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: level = logging.INFO level_for_storage = logging.INFO if verbosity: level = logging.DEBUG if verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() globalLogBeginner.beginLoggingTo([observer])
def run(self): self.factory = HTTPFactory( self.channel_layer, self.action_logger, timeout=self.http_timeout, websocket_timeout=self.websocket_timeout, ping_interval=self.ping_interval, ping_timeout=self.ping_timeout, ws_protocols=self.ws_protocols, root_path=self.root_path, proxy_forwarded_address_header=self.proxy_forwarded_address_header, proxy_forwarded_port_header=self.proxy_forwarded_port_header ) if self.verbosity <= 1: # Redirect the Twisted log to nowhere globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True) else: globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)]) # Disabled deliberately for the moment as it's worse performing if "twisted" in self.channel_layer.extensions and False: logger.info("Using native Twisted mode on channel layer") reactor.callLater(0, self.backend_reader_twisted) else: logger.info("Using busy-loop synchronous mode on channel layer") reactor.callLater(0, self.backend_reader_sync) reactor.callLater(2, self.timeout_checker) for socket_description in self.endpoints: logger.info("Listening on endpoint %s" % socket_description) # Twisted requires str on python2 (not unicode) and str on python3 (not bytes) ep = serverFromString(reactor, str(socket_description)) ep.listen(self.factory) reactor.run(installSignalHandlers=self.signal_handlers)
def run(self): config = self.config observers = [STDLibLogObserver()] globalLogBeginner.beginLoggingTo(observers, redirectStandardIO=False) logging.config.dictConfig(config['logging']) logging.captureWarnings(True) logger.info('Logging configured!') return react(self.main_loop)
def app_main(withgui): try: infomsg('loading TWISTED subsystem') from twisted.internet import reactor from twisted.logger import STDLibLogObserver, globalLogBeginner # redirect twisted logging to python logging globalLogBeginner.beginLoggingTo([STDLibLogObserver()]) infomsg('starting application.') # DHCP, DNS: find missing addresses setExternalPhoneAddress() setExternalGateway() setExternalProxyAddress() try: #force exception if not found config.get(consts.SECTION, consts.EXTPHONEADDR) config.get(consts.SECTION, consts.LOCPROXYADDR) config.get(consts.SECTION, consts.EXTGATEWAY) config.get(consts.SECTION, consts.EXTPROXYADDR) except: raise ZsiposCfgException("wrong or missing parameter") import rtp rtp.init() # @UndefinedVariable if withgui: def thread_init_cb(): rtp.register_gui_thread() # @UndefinedVariable import gui gui.init(thread_init_cb) # @UndefinedVariable log.info("loading SIP subsystem") import GMITM gmitm = GMITM.GMITM() if withgui: gmitm.setEventSink( gui.GUI_GMITMEventListener()) # @UndefinedVariable log.info("GMITM created.") reactor.callLater(1, rtp.postinit) # @UndefinedVariable reactor.run(installSignalHandlers=True) # @UndefinedVariable finally: try: gui.close() # @UndefinedVariable except: pass try: rtp.close() # @UndefinedVariable except: pass
def run(self): # A dict of protocol: {"application_instance":, "connected":, "disconnected":} dicts self.connections = {} # Make the factory self.http_factory = HTTPFactory(self) self.ws_factory = WebSocketFactory(self, server="Daphne") self.ws_factory.setProtocolOptions( autoPingTimeout=self.ping_timeout, allowNullOrigin=True, openHandshakeTimeout=self.websocket_handshake_timeout) if self.verbosity <= 1: # Redirect the Twisted log to nowhere globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True) else: globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)]) # Detect what Twisted features are enabled if http.H2_ENABLED: logger.info("HTTP/2 support enabled") else: logger.info( "HTTP/2 support not enabled (install the http2 and tls Twisted extras)" ) # Kick off the timeout loop reactor.callLater(1, self.application_checker) reactor.callLater(2, self.timeout_checker) reactor.callLater(10, self.monitoring) from pympler import tracker self.tr = tracker.SummaryTracker() for socket_description in self.endpoints: logger.info("Configuring endpoint %s", socket_description) ep = serverFromString(reactor, str(socket_description)) listener = ep.listen(self.http_factory) listener.addCallback(self.listen_success) listener.addErrback(self.listen_error) self.listeners.append(listener) # Set the asyncio reactor's event loop as global # TODO: Should we instead pass the global one into the reactor? asyncio.set_event_loop(reactor._asyncioEventloop) # Verbosity 3 turns on asyncio debug to find those blocking yields if self.verbosity >= 3: asyncio.get_event_loop().set_debug(True) reactor.addSystemEventTrigger("before", "shutdown", self.kill_all_applications) if not self.abort_start: # Trigger the ready flag if we had one if self.ready_callable: self.ready_callable() # Run the reactor reactor.run(installSignalHandlers=self.signal_handlers)
def __init__(self, logger, url, header=None, **kwargs): """X-Ray WebSocket client base class Arguments: url: The URI of the endpoint where the device is connected """ # if necessary, convert serial to a unicode string u = urlparse(url) self.host = u.hostname if u.port: self.port = u.port else: if u.scheme == "ws": self.port = 80 else: self.port = 443 self.ws_factory = None self._logger = logger self._is_shutdown = False predicate = LogLevelFilterPredicate(LogLevel.error) try: if logger.isEnabledFor(logging.DEBUG): setDebugging(True) predicate = LogLevelFilterPredicate(LogLevel.debug) if logger.isEnabledFor(LOG_PROTOCOL_TRACE): txaio.set_global_log_level('trace') else: txaio.set_global_log_level('debug') else: txaio.set_global_log_level('info') except Exception as exc: logger.error(exc) globalLogPublisher.addObserver( FilteringLogObserver(STDLibLogObserver(name=logger.name), predicates=[predicate])) self.ws_factory = self.get_factory(url, header) self.ws_factory.d.addErrback(self._eb) if self.ws_factory.isSecure: contextFactory = ssl.ClientContextFactory() else: contextFactory = None def cleanup(): self.ws_factory.d.cancel() reactor.addSystemEventTrigger('after', 'shutdown', cleanup) connectWS(self.ws_factory, contextFactory)
def divert_logger(): # noinspection PyPackageRequirements from twisted.logger import FilteringLogObserver, LogLevel, LogLevelFilterPredicate, STDLibLogObserver, globalLogBeginner showwarning = warnings.showwarning globalLogBeginner.beginLoggingTo([ FilteringLogObserver( STDLibLogObserver(), [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)]) ], redirectStandardIO=False) warnings.showwarning = showwarning # twisted's beginLoggingTo() will divert python warnings to its own logging system. here we undo that.
def run(self): # Create process-local channel prefixes # TODO: Can we guarantee non-collision better? process_id = "".join( random.choice(string.ascii_letters) for i in range(10)) self.send_channel = "daphne.response.%s!" % process_id # Make the factory self.factory = HTTPFactory( self.channel_layer, action_logger=self.action_logger, send_channel=self.send_channel, timeout=self.http_timeout, websocket_timeout=self.websocket_timeout, websocket_connect_timeout=self.websocket_connect_timeout, ping_interval=self.ping_interval, ping_timeout=self.ping_timeout, ws_protocols=self.ws_protocols, root_path=self.root_path, proxy_forwarded_address_header=self.proxy_forwarded_address_header, proxy_forwarded_port_header=self.proxy_forwarded_port_header, websocket_handshake_timeout=self.websocket_handshake_timeout) if self.verbosity <= 1: # Redirect the Twisted log to nowhere globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True) else: globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)]) # Detect what Twisted features are enabled if http.H2_ENABLED: logger.info("HTTP/2 support enabled") else: logger.info( "HTTP/2 support not enabled (install the http2 and tls Twisted extras)" ) if "twisted" in self.channel_layer.extensions and not self.force_sync: logger.info("Using native Twisted mode on channel layer") reactor.callLater(0, self.backend_reader_twisted) else: logger.info("Using busy-loop synchronous mode on channel layer") reactor.callLater(0, self.backend_reader_sync) reactor.callLater(2, self.timeout_checker) for socket_description in self.endpoints: logger.info("Listening on endpoint %s" % socket_description) # Twisted requires str on python2 (not unicode) and str on python3 (not bytes) ep = serverFromString(reactor, str(socket_description)) listener = ep.listen(self.factory) listener.addErrback(self.on_listener_error) self.listeners.append(listener) reactor.run(installSignalHandlers=self.signal_handlers)
def install(default_path='logging.json', default_level=logging.INFO, env_key='LOG_CFG', kill_on=None): """ Setup logging configuration if the path in the default_path or env_key doesn't exist, default level is used, and the root handler is set to the formattable stream handler """ import os import sys path = os.getenv(env_key, default_path) if os.path.exists(path): with open(path, 'rt') as f: import ujson as json content = json.load(f) logging.config.dictConfig(content) else: from .formatters import ConcatFormatter handler = logging.StreamHandler(sys.stdout) fmt = "%(asctime)s - %(name)s - %(levelname)s - %(uber_message)s" concatf = ConcatFormatter(fmt=fmt, delimiter="; ", operator="= ", log_in_color=True, include_format_keywords=False, parse_text=True) handler.setFormatter(concatf) logging.root.addHandler(handler) logging.root.setLevel(default_level) logging.basicConfig(level=default_level) # this handler is the last one, and will force exit # once a cirtical message has been recieved if kill_on is not None: from .handlers import KillProcessHandler logging.root.addHandler(KillProcessHandler(level=kill_on)) def log_unhandled(exctype, value, tb): getLogger("unhandled").critical("Unhandled Error", exc_info=(exctype, value, tb)) sys.excepthook = log_unhandled if rewire_twisted_log: # clear all observers map(globalLogPublisher.removeObserver, globalLogPublisher._observers) globalLogPublisher.addObserver(STDLibLogObserver())
def main(argv=sys.argv, stderr=sys.stderr): options = DKIMToolOptions() options.parseOptions(argv[1:]) # # Send logging output to stdout # observer = STDLibLogObserver() observer.start() if options["verbose"]: log.levels().setLogLevelForNamespace("txdav.caldav.datastore.scheduling.ischedule.dkim", LogLevel.debug) if options["key-gen"]: _doKeyGeneration(options) elif options["request"]: reactor.callLater(0, _runInReactor, _doRequest, options) reactor.run() elif options["verify"]: reactor.callLater(0, _runInReactor, _doVerify, options) reactor.run() else: usage("Invalid options")
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): """ Set up Python stdlib logging. """ if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: logging.config.dictConfig(log_config) # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") return observer
def start_control_server(): import argparse from twisted.internet import reactor from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver from cheesepi.server.control import (CheeseRPCServerFactory, CheeseRPCServer) from cheesepi.server.storage.mongo import MongoDAO # Argument parsing parser = argparse.ArgumentParser() parser.add_argument('--port', type=int, default=18080, help='Port to listen on') args = parser.parse_args() init_logging() # Make twisted logging write to pythons logging module globalLogPublisher.addObserver( STDLibLogObserver(name="cheesepi.server.control")) # Use twisted logger when in twisted log = Logger() # Logging #log = Logger() #globalLogPublisher.addObserver(PrintingObserver()) #dao = MongoDAO() dao = MongoDAO('localhost', 27017) control_server = CheeseRPCServer(dao).getStreamFactory( CheeseRPCServerFactory) reactor.listenTCP(args.port, control_server) log.info("Starting control server on port %d..." % args.port) reactor.run()
def run(self): self.factory = HTTPFactory( self.channel_layer, self.action_logger, timeout=self.http_timeout, websocket_timeout=self.websocket_timeout, ping_interval=self.ping_interval, ping_timeout=self.ping_timeout, ws_protocols=self.ws_protocols, root_path=self.root_path, proxy_forwarded_address_header=self.proxy_forwarded_address_header, proxy_forwarded_port_header=self.proxy_forwarded_port_header) if self.verbosity <= 1: # Redirect the Twisted log to nowhere globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True) else: globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)]) # Listen on a socket if self.unix_socket: reactor.listenUNIX(self.unix_socket, self.factory) elif self.file_descriptor: # socket returns the same socket if supplied with a fileno sock = socket.socket(fileno=self.file_descriptor) reactor.adoptStreamPort(self.file_descriptor, sock.family, self.factory) else: reactor.listenTCP(self.port, self.factory, interface=self.host) if "twisted" in self.channel_layer.extensions and False: logger.info("Using native Twisted mode on channel layer") reactor.callLater(0, self.backend_reader_twisted) else: logger.info("Using busy-loop synchronous mode on channel layer") reactor.callLater(0, self.backend_reader_sync) reactor.callLater(2, self.timeout_checker) reactor.run(installSignalHandlers=self.signal_handlers)
def serve( application: "WSGIApplication", port: int = 8000, metrics_port: int = 9000, access_log_formatter: LogFormatter = proxiedLogFormatter, health_check_path: str = "/healthz", ): # Quiet the Twisted factory logging. Factory.noisy = False # Start logging. logging.basicConfig(level=logging.INFO) observers = [STDLibLogObserver()] logger.globalLogBeginner.beginLoggingTo(observers) # Create the server. pool = threadpool.ThreadPool() reactor.callWhenRunning(pool.start) _listen_wsgi( reactor, pool, application, port, access_log_formatter, health_check_path, ) _listen_metrics(reactor, metrics_port) # Register the metrics collector. REGISTRY.register(TwistedThreadPoolCollector(pool)) # Start the main loop. reactor.run() # Clean up when exiting. pool.stop()
def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) -> None: """ Set up Python standard library logging. """ if log_config_path is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) else: # Load the logging configuration. _load_logging_config(log_config_path) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* # rather than when we write to a handler. This can be done in config using # filter options, but care must when using e.g. MemoryHandler to buffer # writes. log_context_filter = LoggingContextFilter(request="") log_metadata_filter = MetadataFilter({"server_name": config.server_name}) old_factory = logging.getLogRecordFactory() def factory(*args, **kwargs): record = old_factory(*args, **kwargs) log_context_filter.filter(record) log_metadata_filter.filter(record) return record logging.setLogRecordFactory(factory) # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() threadlocal = threading.local() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return # this is a workaround to make sure we don't get stack overflows when the # logging system raises an error which is written to stderr which is redirected # to the logging system, etc. if getattr(threadlocal, "active", False): # write the text of the event, if any, to the *real* stderr (which may # be redirected to /dev/null, but there's not much we can do) try: event_text = eventAsText(event) print("logging during logging: %s" % event_text, file=sys.__stderr__) except Exception: # gah. pass return try: threadlocal.active = True return observer(event) finally: threadlocal.active = False logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): """ Set up Python stdlib logging. """ if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) else: logging.config.dictConfig(log_config) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* # rather than when we write to a handler. This can be done in config using # filter options, but care must when using e.g. MemoryHandler to buffer # writes. log_filter = LoggingContextFilter(request="") old_factory = logging.getLogRecordFactory() def factory(*args, **kwargs): record = old_factory(*args, **kwargs) log_filter.filter(record) return record logging.setLogRecordFactory(factory) # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs") return observer
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(signum, stack): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) def sighup(signum, stack): # it might be better to use a file watcher or something for this. logging.info("Reloading log config from %s due to SIGHUP", log_config) load_log_config() load_log_config() # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() globalLogBeginner.beginLoggingTo( [observer], redirectStandardIO=not config.no_redirect_stdio, )
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding='utf8') def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(*args): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio, ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")
res = json.loads(response) if "result" not in res and "address" not in res["result"]: logger.info("Identity Not Found!") return logger.info("Identity found {}: {}".format(res["result"]["state"], res["result"]["address"])) work.load_today() def _identify_fail(failure): """ On failure, print error and return """ logger.warning(failure.getErrorMessage()) logger.warning("Failed to setup & obtain identity") return if __name__ == "__main__": logging.basicConfig(level=logging.INFO) filtering = FilteringLogObserver( STDLibLogObserver(), [LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn)]) globalLogBeginner.beginLoggingTo([filtering]) ctx = context.init_context(HOME, reactor) work = worker.Worker(ctx) d = work.setup() d.addCallbacks(_identify_pass, _identify_fail, callbackArgs=(work, )) reactor.run()
from twisted.internet.protocol import DatagramProtocol, ProcessProtocol from twisted.internet import reactor, utils from twisted.logger import Logger, STDLibLogObserver import logging import socket logging.basicConfig(level=logging.DEBUG) log = Logger(observer=STDLibLogObserver()) LOCAL_IP_ADDRESS = socket.gethostbyname(socket.gethostname()) class BroadcastRouteProtocol(DatagramProtocol): def startProtocol(self): self.transport.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True) def datagramReceived(self, datagram, addr): destination = datagram.decode().strip('\n') log.debug(f'Received {destination} from {addr}.') if addr[0] != LOCAL_IP_ADDRESS: log.debug('Appending route.') # Broadcast came from addr so we now that client 'destination' is connected to this node. append_route(destination, addr[0]) else: log.debug('Doing nothing because broadcast came from us.') def append_route(destination: str, gateway: str):
# test configuration from config import get_config # # Set up nose testing # from nose.tools import with_setup, assert_raises from nose.twistedtools import deferred, stop_reactor, threaded_reactor, reactor logging.basicConfig() logging.root.setLevel(logging.DEBUG) # make Twisted log to Python std logging too globalLogPublisher.addObserver(STDLibLogObserver()) # # Common test setup and teardown # def setup(): "start up beanstalkd" global process config = get_config("ServerConn") logger = logging.getLogger("setup") cmd = os.path.join(config.BPATH, config.BEANSTALKD) host = config.BEANSTALKD_HOST port = config.BEANSTALKD_PORT try:
def main(): parser = argparse.ArgumentParser() # Network options group_network_container = parser.add_argument_group( title="Network options") group_network = group_network_container.add_mutually_exclusive_group( required=True) group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet") group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet") group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet") group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet") group_network.add_argument("--config", action="store", help="Use a specific config file") # Ports for RPC and REST api group_modes = parser.add_argument_group(title="Mode(s)") group_modes.add_argument( "--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)") group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)") # Advanced logging setup group_logging = parser.add_argument_group(title="Logging options") group_logging.add_argument("--logfile", action="store", type=str, help="Logfile") group_logging.add_argument( "--syslog", action="store_true", help= "Log to syslog instead of to log file ('user' is the default facility)" ) group_logging.add_argument( "--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help= "Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0')." ) group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # host parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0") # Now parse args = parser.parse_args() # print(args) if not args.port_rpc and not args.port_rest: print("Error: specify at least one of --port-rpc / --port-rest") parser.print_help() return if args.port_rpc == args.port_rest: print("Error: --port-rpc and --port-rest cannot be the same") parser.print_help() return if args.logfile and (args.syslog or args.syslog_local): print("Error: Cannot only use logfile or syslog at once") parser.print_help() return # Setting the datadir must come before setting the network, else the wrong path is checked at net setup. if args.datadir: settings.set_data_dir(args.datadir) # Network configuration depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.testnet: settings.setup_testnet() elif args.privnet: settings.setup_privnet() elif args.coznet: settings.setup_coznet() if args.maxpeers: settings.set_max_peers(args.maxpeers) if args.syslog or args.syslog_local is not None: # Setup the syslog facility if args.syslog_local is not None: print("Logging to syslog local%s facility" % args.syslog_local) syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local else: print("Logging to syslog user facility") syslog_facility = SysLogHandler.LOG_USER # Setup logzero to only use the syslog handler logzero.syslog(facility=syslog_facility) else: # Setup file logging if args.logfile: logfile = os.path.abspath(args.logfile) if args.disable_stderr: print("Logging to logfile: %s" % logfile) else: print("Logging to stderr and logfile: %s" % logfile) logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr) else: print("Logging to stdout and stderr") # Disable logging smart contract events settings.set_log_smart_contract_events(False) # Write a PID file to easily quit the service write_pid_file() # Setup Twisted and Klein logging to use the logzero setup observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER) globalLogPublisher.addObserver(observer) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) # Setup twisted reactor, NodeLeader and start the NotificationDB reactor.suggestThreadPoolSize(15) NodeLeader.Instance().Start() NotificationDB.instance().start() # Start a thread with custom code d = threading.Thread(target=custom_background_code) d.setDaemon( True ) # daemonizing the thread will kill it when the main thread is quit d.start() if args.port_rpc: logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc)) api_server_rpc = JsonRpcApi(args.port_rpc) endpoint_rpc = "tcp:port={0}:interface={1}".format( args.port_rpc, args.host) endpoints.serverFromString(reactor, endpoint_rpc).listen( Site(api_server_rpc.app.resource())) # reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc)) # api_server_rpc.app.run(args.host, args.port_rpc) if args.port_rest: logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest)) api_server_rest = RestApi() endpoint_rest = "tcp:port={0}:interface={1}".format( args.port_rest, args.host) endpoints.serverFromString(reactor, endpoint_rest).listen( Site(api_server_rest.app.resource())) # api_server_rest.app.run(args.host, args.port_rest) reactor.run() # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown()
def main(): parser = argparse.ArgumentParser() # Network options group_network_container = parser.add_argument_group(title="Network options") group_network = group_network_container.add_mutually_exclusive_group(required=True) group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet") group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet") group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet") group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet") group_network.add_argument("--config", action="store", help="Use a specific config file") # Ports for RPC and REST api group_modes = parser.add_argument_group(title="Mode(s)") group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)") group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)") # Advanced logging setup group_logging = parser.add_argument_group(title="Logging options") group_logging.add_argument("--logfile", action="store", type=str, help="Logfile") group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)") group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').") group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger") # Where to store stuff parser.add_argument("--datadir", action="store", help="Absolute path to use for database directories") # peers parser.add_argument("--maxpeers", action="store", default=5, help="Max peers to use for P2P Joining") # If a wallet should be opened parser.add_argument("--wallet", action="store", help="Open wallet. Will allow you to use methods that require an open wallet") # host parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0") # Now parse args = parser.parse_args() # print(args) if not args.port_rpc and not args.port_rest: print("Error: specify at least one of --port-rpc / --port-rest") parser.print_help() return if args.port_rpc == args.port_rest: print("Error: --port-rpc and --port-rest cannot be the same") parser.print_help() return if args.logfile and (args.syslog or args.syslog_local): print("Error: Cannot only use logfile or syslog at once") parser.print_help() return # Setting the datadir must come before setting the network, else the wrong path is checked at net setup. if args.datadir: settings.set_data_dir(args.datadir) # Network configuration depending on command line arguments. By default, the testnet settings are already loaded. if args.config: settings.setup(args.config) elif args.mainnet: settings.setup_mainnet() elif args.testnet: settings.setup_testnet() elif args.privnet: settings.setup_privnet() elif args.coznet: settings.setup_coznet() if args.maxpeers: try: settings.set_max_peers(args.maxpeers) print("Maxpeers set to ", args.maxpeers) except ValueError: print("Please supply a positive integer for maxpeers") return if args.syslog or args.syslog_local is not None: # Setup the syslog facility if args.syslog_local is not None: print("Logging to syslog local%s facility" % args.syslog_local) syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local else: print("Logging to syslog user facility") syslog_facility = SysLogHandler.LOG_USER # Setup logzero to only use the syslog handler logzero.syslog(facility=syslog_facility) else: # Setup file logging if args.logfile: logfile = os.path.abspath(args.logfile) if args.disable_stderr: print("Logging to logfile: %s" % logfile) else: print("Logging to stderr and logfile: %s" % logfile) logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr) else: print("Logging to stdout and stderr") if args.wallet: if not os.path.exists(args.wallet): print("Wallet file not found") return passwd = os.environ.get('NEO_PYTHON_JSONRPC_WALLET_PASSWORD', None) if not passwd: passwd = prompt("[password]> ", is_password=True) password_key = to_aes_key(passwd) try: wallet = UserWallet.Open(args.wallet, password_key) except Exception as e: print(f"Could not open wallet {e}") return else: wallet = None # Disable logging smart contract events settings.set_log_smart_contract_events(False) # Write a PID file to easily quit the service write_pid_file() # Setup Twisted and Klein logging to use the logzero setup observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER) globalLogPublisher.addObserver(observer) def loopingCallErrorHandler(error): logger.info("Error in loop: %s " % error) # Instantiate the blockchain and subscribe to notifications blockchain = LevelDBBlockchain(settings.chain_leveldb_path) Blockchain.RegisterBlockchain(blockchain) start_block_persisting() # If a wallet is open, make sure it processes blocks if wallet: walletdb_loop = task.LoopingCall(wallet.ProcessBlocks) wallet_loop_deferred = walletdb_loop.start(1) wallet_loop_deferred.addErrback(loopingCallErrorHandler) # Setup twisted reactor, NodeLeader and start the NotificationDB reactor.suggestThreadPoolSize(15) NodeLeader.Instance().Start() NotificationDB.instance().start() # Start a thread with custom code d = threading.Thread(target=custom_background_code) d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit d.start() if args.port_rpc: logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc)) try: rpc_class = load_class_from_path(settings.RPC_SERVER) except ValueError as err: logger.error(err) sys.exit() api_server_rpc = rpc_class(args.port_rpc, wallet=wallet) endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host) endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource())) if args.port_rest: logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest)) try: rest_api = load_class_from_path(settings.REST_SERVER) except ValueError as err: logger.error(err) sys.exit() api_server_rest = rest_api() endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host) endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource())) reactor.addSystemEventTrigger('before', 'shutdown', stop_block_persisting) reactor.run() # After the reactor is stopped, gracefully shutdown the database. logger.info("Closing databases...") NotificationDB.close() Blockchain.Default().Dispose() NodeLeader.Instance().Shutdown() if wallet: wallet.Close()
def configure_logging(): logging.basicConfig(level=logging.INFO) globalLogBeginner.beginLoggingTo([STDLibLogObserver(name='shinysdr')])
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use the 'worker_log_config' option instead of 'log_config'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = config.worker_log_config if use_worker_options else config.log_config if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, "r") as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")