def init_logging(self): """ Log to stdout and the file """ root = logging.getLogger() root.setLevel(logging.DEBUG) formatter = logging.Formatter(self._log_format) #: Log to stdout stream = logging.StreamHandler(sys.stdout) stream.setLevel(logging.DEBUG) stream.setFormatter(formatter) #: Log to rotating handler disk = RotatingFileHandler( self._log_filename, maxBytes=1024 * 1024 * 10, # 10 MB backupCount=10, ) disk.setLevel(logging.DEBUG) disk.setFormatter(formatter) root.addHandler(disk) root.addHandler(stream) #: Start twisted logger from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start()
def connect(server, password=None): """ Connect to a VNCServer and return a Client instance that is usable in the main thread of non-Twisted Python Applications, EXPERIMENTAL. >>> from vncdotool import threaded >>> client = threaded.connect('host') >>> client.keyPress('c') >>> client.join() You may then call any regular VNCDoToolClient method on client from your application code. If you are using a GUI toolkit or other major async library please read http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html for a better method of intergrating vncdotool. """ observer = PythonLoggingObserver() observer.start() factory = VNCDoToolFactory() if password is not None: factory.password = password client = ThreadedVNCClientProxy(factory) host, port = command.parse_host(server) client.connect(host, port) client.start() return client
def setup_logs(application=None): """ Configure logging for the bot. :param application: an application object, if using twistd :type application: service.Application """ # todo arbitrary logging file obs = PythonLoggingObserver() if not application: obs.start() else: application.setComponent(ILogObserver, obs.emit) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) logging.getLogger('twistedpusher.client').setLevel(logging.INFO) file_lf = logging.Formatter("%(asctime)s %(levelname)-8s [%(name)s] %(message)s") file_lf.converter = time.gmtime file_h = logging.FileHandler('bot.log') file_h.setFormatter(file_lf) root_logger.addHandler(file_h) console_lf = logging.Formatter("%(levelname)-8s [%(name)s] %(message)s") console_h = logging.StreamHandler() console_h.setFormatter(console_lf) root_logger.addHandler(console_h)
def main(application=None): """ SMTP daemon for receiving DSN (Delivery Status Notification) :param application: optional Application instance (if used inside twistd) :type application: twisted.application.service.Application """ parser = argparse.ArgumentParser( description='Start the SMTPD process for CloudMailing.') parser.add_argument('-p', '--port', type=int, default=25, help='port number for SMTP (default: 25)') parser.add_argument('-u', '--uid', help='Change the UID of this process') parser.add_argument('-g', '--gid', help='Change the GID of this process') args = parser.parse_args() # Need to open TCP port early, before to switch user and configure log portal = Portal(SimpleRealm()) portal.registerChecker(AllowAnonymousAccess()) factory = ReturnPathSMTPFactory(portal) if application: smtpd = internet.TCPServer(args.port, factory) smtpd.setServiceParent(application) else: smtpd = reactor.listenTCP(args.port, factory) if args.uid or args.gid: uid = args.uid and pwd.getpwnam(args.uid).pw_uid or None gid = args.gid and grp.getgrnam(args.gid).gr_gid or None # for fname in os.listdir(settings.LOG_PATH): # fullname = os.path.join(settings.LOG_PATH, fname) # print fullname # if args.uid: # os.chown(fullname, args.uid, args.gid) switchUID(uid, gid) configure_logging("smtpd", settings.CONFIG_PATH, settings.LOG_PATH, settings.DEFAULT_LOG_FORMAT, False) ##Twisted logs observer = PythonLoggingObserver() observer.start() log = logging.getLogger("smtpd") log.info( "****************************************************************") log.info("Starting CloudMailing SMTPD version %s" % VERSION) log.info("Serial: %s" % settings.SERIAL) log.info("Twisted version %s", twisted.version.short()) log.info( "****************************************************************") Db.getInstance(settings.MASTER_DATABASE) log.info("CM SMTPD started on port %d", args.port)
def initializeLogging(configFile): from logging.config import fileConfig from twisted.python.log import PythonLoggingObserver fileConfig(configFile) observer = PythonLoggingObserver() observer.start()
def postOptions(self): from baca.application import app if self.opts['config'] == "~/.ilog": self.opt_config(self.opts['config']) if not isfile(join(app.config.dir, app.config.file)): app.config_initial_populate() app.config_save() app.config_load() # Setup logging from baca.utils.logger import Logging if logging.getLoggerClass() is not Logging: logging.config.fileConfig( usefull_path(str(app.config.logging_config_file)) ) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() # self._setup_database() app.setup_log() if not self.subCommand: self.opt_help()
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: handler = logging.FileHandler(self.log_file) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def emit(self, event_dict): log = logging.getLogger(__name__) if 'log_failure' in event_dict: fmt = '%(log_namespace)s \n%(log_failure)s' getattr(LoggingLoggerClass, event_dict['log_level'].name)(log, fmt % (event_dict)) else: PythonLoggingObserver.emit(self, event_dict)
def main(application=None): """ Startup sequence for CM Satellite :param application: optional Application instance (if used inside twistd) :type application: twisted.application.service.Application """ configure_logging("satellite", settings.CONFIG_PATH, settings.LOG_PATH, settings.DEFAULT_LOG_FORMAT, False) ##Twisted logs observer = PythonLoggingObserver() observer.start() log = logging.getLogger("cm") log.info("****************************************************************") log.info("Starting CloudMailing SATELLITE version %s" % VERSION ) log.info("Serial: %s" % settings.SERIAL) log.info("Twisted version %s", twisted.version.short()) log.info("****************************************************************") ssl_context_factory = ssl.ClientContextFactory() db_conn = connect(settings.SATELLITE_DATABASE) Db.getInstance(settings.SATELLITE_DATABASE) init_db(db_conn[settings.SATELLITE_DATABASE]) # attach the service to its parent application start_satellite_service(application=application, master_ip=settings.MASTER_IP, master_port=settings.MASTER_PORT, ssl_context_factory=ssl_context_factory)
def twisted_logging(logger, level=None): from twisted.python.log import PythonLoggingObserver try: if level is not None: logger.getChild('twisted').setLevel(level) observer = PythonLoggingObserver(logger.name + '.twisted') observer.start() except: logger.critical("Could not add twisted observer!", exc_info=True)
def makeService(config): if config['logging']: logging.config.fileConfig(config['logging']) else: logging.basicConfig(level=config.loglevel) obs = PythonLoggingObserver() obs.start() config = loader.load_config(config['config']) loader.setup_environment(config) return runner.getService(config, reactor)
def main(application=None): """ Startup sequence for CM Master :param application: optional Application instance (if used inside twistd) :type application: twisted.application.service.Application """ parser = argparse.ArgumentParser(description='Start the Master process for CloudMailing.') parser.add_argument('-p', '--port', type=int, default=33620, help='port number for Master MailingManager (default: 33620)') parser.add_argument('--api-interface', default='', help='network interface (IP address) on which API should listen (default: <empty> = all)') parser.add_argument('--api-port', type=int, default=33610, help='port number for API (default: 33610)') parser.add_argument('--api-dont-use-ssl', action='store_true', default=False, help='ask API to not use secure port (SSL)') args = parser.parse_args() configure_logging("master", settings.CONFIG_PATH, settings.LOG_PATH, settings.DEFAULT_LOG_FORMAT, False) ##Twisted logs observer = PythonLoggingObserver() observer.start() log = logging.getLogger("cm") log.info("****************************************************************") log.info("Starting CloudMailing version %s" % VERSION ) log.info("Serial: %s" % settings.SERIAL) log.info("Twisted version %s", twisted.version.short()) log.info("****************************************************************") ssl_context_factory = make_SSL_context() db_conn = None while not db_conn: try: db_conn = connect(settings.MASTER_DATABASE) init_master_db(db_conn[settings.MASTER_DATABASE]) log.info("Connected to database '%s'", settings.MASTER_DATABASE) except (pymongo.errors.ConnectionFailure, pymongo.errors.ServerSelectionTimeoutError): log.error("Failed to connect to database server!") # special case for MailFountain hardward only if os.path.exists("/data/mongodb/mongod.lock"): os.remove("/data/mongodb/mongod.lock") os.system('su -m mongodb -c "mongod --config /usr/local/etc/mongodb.conf --dbpath /data/mongodb/ --repair"') os.system("service mongod start") else: log.info(" Trying again in 5 seconds...") time.sleep(5) Db.getInstance(settings.MASTER_DATABASE, pool_size=10, watchdog_timeout=60) # attach the service to its parent application apiService = get_api_service(application, port=args.api_port, interface=args.api_interface, ssl_context_factory=not args.api_dont_use_ssl and ssl_context_factory or None) start_master_service(application, master_port=args.port, ssl_context_factory=ssl_context_factory)
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 ) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def emit(self, event_dict): log = logging.getLogger(__name__) if 'log_failure' in event_dict: fmt = '%(log_namespace)s \n%(log_failure)s' getattr(LoggingLoggerClass, event_dict['log_level'].name)(log, fmt % (event_dict)) return try: PythonLoggingObserver.emit(self, event_dict) except TypeError: # Ignore logging args problem with Python 3.8 and Twisted <= 19 pass
def setupLogger(level="error", filename=None, filemode="w"): """ Sets up the basic logger and if `:param:filename` is set, then it will log to that file instead of stdout. :param level: str, the level to log :param filename: str, the file to log to """ import logging if logging.getLoggerClass() is not Logging: logging.setLoggerClass(Logging) logging.addLevelName(5, 'TRACE') logging.addLevelName(1, 'GARBAGE') level = levels.get(level, logging.ERROR) rootLogger = logging.getLogger() if filename and filemode == 'a': import logging.handlers handler = logging.handlers.RotatingFileHandler( filename, filemode, maxBytes=50 * 1024 * 1024, # 50 Mb backupCount=3, encoding='utf-8', delay=0) elif filename and filemode == 'w': import logging.handlers handler = getattr(logging.handlers, 'WatchedFileHandler', logging.FileHandler)(filename, filemode, 'utf-8', delay=0) else: handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT % MAX_LOGGER_NAME_LENGTH, datefmt="%H:%M:%S") handler.setFormatter(formatter) rootLogger.addHandler(handler) rootLogger.setLevel(level) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.getLogger("twisted").setLevel(level)
def setupLogger(level="error", filename=None, filemode="w"): """ Sets up the basic logger and if `:param:filename` is set, then it will log to that file instead of stdout. :param level: str, the level to log :param filename: str, the file to log to """ import logging if logging.getLoggerClass() is not Logging: logging.setLoggerClass(Logging) logging.addLevelName(5, 'TRACE') logging.addLevelName(1, 'GARBAGE') level = levels.get(level, logging.ERROR) rootLogger = logging.getLogger() if filename and filemode=='a': import logging.handlers handler = logging.handlers.RotatingFileHandler( filename, filemode, maxBytes=50*1024*1024, # 50 Mb backupCount=3, encoding='utf-8', delay=0 ) elif filename and filemode=='w': import logging.handlers handler = getattr( logging.handlers, 'WatchedFileHandler', logging.FileHandler)( filename, filemode, 'utf-8', delay=0 ) else: handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter( DEFAULT_LOGGING_FORMAT % MAX_LOGGER_NAME_LENGTH, datefmt="%H:%M:%S" ) handler.setFormatter(formatter) rootLogger.addHandler(handler) rootLogger.setLevel(level) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.getLogger("twisted").setLevel(level)
def setup_logfile(self, spider): observer = PythonLoggingObserver('twisted') observer.start() if not os.path.exists(DIR_SCRAPY_LOG): os.makedirs(DIR_SCRAPY_LOG) handler = logging.FileHandler('%s/%s.log' % (DIR_SCRAPY_LOG, spider.name), mode='w') handler.setLevel('INFO') # logger = logging.getLogger(spider.name) # logger.addHandler(handler) # logger.setLevel(logging.INFO) logging.root.addHandler(handler) logging.getLogger("requests").setLevel(logging.WARNING)
def initialize_logger(self, to_stdout=False): if to_stdout: handler = logging.StreamHandler(stream=sys.stdout) startLogging(sys.stdout) else: handler = DequeHandler(self.log_deque) observer = PythonLoggingObserver() observer.start() fmt = "%(asctime)s %(levelname)s %(funcName)s %(message)s" handler.setFormatter(logging.Formatter(fmt)) logger = logging.getLogger() logger.addHandler(handler) logger.setLevel(logging.DEBUG) logging.debug("Hello World!")
def setup_logging(log_config=None, log_file=None, verbosity=None): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: level = logging.INFO level_for_storage = logging.INFO if verbosity: level = logging.DEBUG if verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def run(self): root = Resource() root.putChild("key", LocalKey(self)) site = server.Site(root) reactor.listenSSL(self.bind_port, site, self.ssl_context_factory, interface=self.bind_host) logging.basicConfig(level=logging.DEBUG) observer = PythonLoggingObserver() observer.start() reactor.run()
def run_main_view_wx(config): """ Runs main UI view based on wx framework. """ # imports import wx import socket # might be some cross platform (windows) issues reported with wxReactor from twisted.internet import wxreactor # add twisted / wx interaction support wxreactor.install() # add logging observer from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start() # then can do normal reactor imports from twisted.internet import reactor # and wx specific implementations from ui.view_model_wx import MainViewController from ui.view_model_wx import MainViewModel from ui.main_view_wx import MainWindow # ip address *much* faster than by device name ipaddr = socket.gethostbyname(config.server_name) logging.debug("RPC:\tServer name %s resolved to IP address %s" % (config.server_name, ipaddr)) # create rpc client from web.webclient import RPCClient, RPCClientFactory rpc_client = RPCClient() # create view model view_model = MainViewModel() # create view controller controller = MainViewController(rpc_client, view_model, config) # create wxApp and main window wxApp = wx.App(False) frame = MainWindow(None, "fishpi - Proof Of Concept Vehicle control", controller, ipaddr, config.rpc_port, config.camera_port) frame.Show() # run reactor rather than usual 'wxApp.MainLoop()' reactor.registerWxApp(wxApp) logging.debug("RPC:\tconnecting to %s (%s) on port %s" % (config.server_name, ipaddr, config.rpc_port)) reactor.connectTCP(ipaddr, config.rpc_port, RPCClientFactory(controller)) #reactor.callLater(5, update_callback) reactor.run()
def main(): from twisted.internet import reactor logging.basicConfig(level=logging.DEBUG) from twisted.python.log import PythonLoggingObserver obs = PythonLoggingObserver() server = SSHServer(reactor, "localhost", 22) d = server.connect() def runCommands(server): p1 = server.runCommand("ls /root", RunCommandProtocol) p2 = server.runCommand("whoami", RunCommandProtocol) c1, c2 = p1.finished, p2.finished c1.addErrback(log_err, log, "ssh command/copy to stdout failed") c2.addErrback(log_err, log, "ssh command/copy to stdout failed") dl = DeferredList([c1, c2]) def printResults(reslist): print "p1 out:", p1.out.getvalue() print "p1 err:", p1.err.getvalue() print "p2 out:", p2.out.getvalue() print "p2 err:", p2.err.getvalue() reactor.stop() dl.addCallback(printResults) d.addCallback(runCommands) reactor.run()
def run(self): root = Resource() root.putChild("key", LocalKey(self)) site = server.Site(root) reactor.listenSSL( self.bind_port, site, self.ssl_context_factory, interface=self.bind_host ) logging.basicConfig(level=logging.DEBUG) observer = PythonLoggingObserver() observer.start() reactor.run()
def setup(self): """ Initialize the crochet library. This starts the reactor in a thread, and connect's Twisted's logs to Python's standard library logging module. This must be called at least once before the library can be used, and can be called multiple times. """ if self._started: return self._common_setup() if platform.type == "posix": self._reactor.callFromThread(self._startReapingProcesses) if self._startLoggingWithObserver: observer = ThreadLogObserver(PythonLoggingObserver().emit) self._reactor.callFromThread(self._startLoggingWithObserver, observer, False) # We only want to stop the logging thread once the reactor has # shut down: self._reactor.addSystemEventTrigger("after", "shutdown", observer.stop) t = threading.Thread( target=lambda: self._reactor.run(installSignalHandlers=False)) t.start() self._atexit_register(self._reactor.callFromThread, self._reactor.stop) self._atexit_register(_store.log_errors) if self._watchdog_thread is not None: self._watchdog_thread.start()
def _getLogObserver(self): if self._logfilename == "-": log_file = sys.stdout else: log_file = open(self._logfilename, "a") # Setup file logger log_handler = logging.StreamHandler(log_file) formatter = logging.Formatter( "%(asctime)s: %(name)s@%(levelname)s: %(message)s") log_handler.setFormatter(formatter) # Also capture zookeeper logs (XXX not compatible with rotation) zookeeper.set_log_stream(log_file) # Configure logging. root = logging.getLogger() root.addHandler(log_handler) root.setLevel(logging.getLevelName(self._loglevel)) # Twisted logging is painfully verbose on twisted.web, and # there isn't a good way to distinguish different channels # within twisted, so just utlize error level logging only for # all of twisted. twisted_log = logging.getLogger("twisted") twisted_log.setLevel(logging.ERROR) observer = PythonLoggingObserver() return observer.emit
def app_main(self, config, options, args): # TODO: deal with new user registrations by listening to amqp and schedule the rest observer = PythonLoggingObserver() observer.start() # initialize some important maps db.configure_session(db.create_url_from_config(config['db'])) service.initialize() post_type.initialize() # Grab twitter consumer keys self.consumer_key = config['oauth']['twitter']['key'] self.consumer_secret = config['oauth']['twitter']['secret'] self.default_token_key = config['oauth']['twitter']['default_access_token'] self.default_token_secret = config['oauth']['twitter']['default_access_token_secret'] # Grab feed configuration self.wait_on_collector_query_delay = float(config['feed']['wait_on_collector_query_delay']) # Configure amqp amqp_host = config['amqp']['broker']['host'] amqp_port = int(config['amqp']['broker']['port']) amqp_spec = message_queue.create_spec_path(config['amqp']['broker']['spec']) self.amqp_exchange = config['amqp']['exchange']['name'] self.amqp = AmqpFactory(host=amqp_host, port=amqp_port, spec_file=amqp_spec) db_host = config['db']['host'] db_user = config['db']['user'] db_passwd = config['db']['password'] db_database = config['db']['database'] db_unicode = to_bool(config['db']['unicode']) self.db_pool = adbapi.ConnectionPool( 'MySQLdb', host=db_host, user=db_user, passwd=db_passwd, db=db_database, use_unicode=db_unicode, cp_noisy=True) self._process_twitter_users() reactor.run()
def run_backend(bypass_checks=False, flags_dict=None, frontend_pid=None): """ Run the backend for the application. :param bypass_checks: whether we should bypass the checks or not :type bypass_checks: bool :param flags_dict: a dict containing the flag values set on app start. :type flags_dict: dict """ # In the backend, we want all the components to log into logbook # that is: logging handlers and twisted logs from logbook.compat import redirect_logging from twisted.python.log import PythonLoggingObserver redirect_logging() observer = PythonLoggingObserver() observer.start() # NOTE: this needs to be used here, within the call since this function is # executed in a different process and it seems that the process/thread # identification isn't working 100% logger = get_logger() # noqa # The backend is the one who always creates the certificates. Either if it # is run separately or in a process in the same app as the frontend. if flags.ZMQ_HAS_CURVE: generate_zmq_certificates() # ignore SIGINT since app.py takes care of signaling SIGTERM to us. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal_handler) if flags_dict is not None: dict_to_flags(flags_dict) # HACK we should be able to run the ensure_server anyway but right now it # breaks if we run it twice. if not flags.STANDALONE: # start the events server # This is not needed for the standalone bundle since the launcher takes # care of it. event_server.ensure_server() backend = LeapBackend(bypass_checks=bypass_checks, frontend_pid=frontend_pid) backend.run()
def setup_logger(name): if DEBUGLOG: logging.basicConfig( level=logging.DEBUG, format=name + ' %(asctime)-15s %(levelname)-8s %(message)s', ) disabled_loggers = [] if not PIKALOG: disabled_loggers.append('pika') for logger in disabled_loggers: logging.getLogger(logger).setLevel(logging.WARNING) new_defaults = list(Server.__init__.__defaults__) # NOTE: Patch `action_logger` argument default value. new_defaults[6] = AccessLogGenerator(sys.stdout) Server.__init__.__defaults__ = tuple(new_defaults) observer = PythonLoggingObserver(loggerName='twisted') observer.start()
def main(client_class): startLogging(stdout) observer = PythonLoggingObserver() observer.start() config_file = path.join(environ['EXPERIMENT_DIR'], "logger.conf") # TODO(emilon): Document this on the user manual if path.exists(config_file): msg("This experiment has a logger.conf, using it.") logging.config.fileConfig(config_file) else: msg("No logger.conf found for this experiment.") factory = ExperimentClientFactory({}, client_class) msg("Connecting to: %s:%s" % (environ['SYNC_HOST'], int(environ['SYNC_PORT']))) reactor.connectTCP(environ['SYNC_HOST'], int(environ['SYNC_PORT']), factory) reactor.exitCode = 0 reactor.run() exit(reactor.exitCode)
def run_backend(bypass_checks=False, flags_dict=None, frontend_pid=None): """ Run the backend for the application. This is called from the main app.py entrypoint, and is run in a child subprocess. :param bypass_checks: whether we should bypass the checks or not :type bypass_checks: bool :param flags_dict: a dict containing the flag values set on app start. :type flags_dict: dict """ # In the backend, we want all the components to log into logbook # that is: logging handlers and twisted logs from logbook.compat import redirect_logging from twisted.python.log import PythonLoggingObserver redirect_logging() observer = PythonLoggingObserver() observer.start() if flags_dict is not None: dict_to_flags(flags_dict) common_flags.STANDALONE = flags.STANDALONE # NOTE: this needs to be used here, within the call since this function is # executed in a different process and it seems that the process/thread # identification isn't working 100% logger = get_logger() # noqa # The backend is the one who always creates the certificates. Either if it # is run separately or in a process in the same app as the frontend. if flags.ZMQ_HAS_CURVE: generate_zmq_certificates() # ignore SIGINT since app.py takes care of signaling SIGTERM to us. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal_handler) reactor.callWhenRunning(start_events_and_updater, logger) backend = LeapBackend(bypass_checks=bypass_checks, frontend_pid=frontend_pid) backend.run()
def postOptions(self): if self.opts['config'] == "~/.ilog": self.opt_config(self.opts['config']) # Setup logging from ilog.utils.logger import Logging if logging.getLoggerClass() is not Logging: logging.config.fileConfig( usefull_path(str(app.config.logging_config_file)) ) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() self._setup_database() if not self.subCommand: self.opt_help()
def main(): dir = dirname(__file__) dir = dir if dir else '.' logging.config.fileConfig(dir + '/logging.conf') logging.info('VNS Simulator starting up') PythonLoggingObserver().start() # log twisted messages too tlog.startLogging(NoOpTwistedLogger(), setStdout=False) sim = VNSSimulator() reactor.addSystemEventTrigger("before", "shutdown", sim.cleanup_and_exit) reactor.run()
def main(): parser = OptionParser(usage="%prog [options] [actions]", version= "%prog: " + common.get_version()) parser.add_option("-l", "--logfile", dest="logfile", help="Set the logfile location", action="store", type="str") parser.add_option("-L", "--loglevel", dest="loglevel", help="Set the log level: none, info, warning, error, critical, debug", action="store", type="str") (options, args) = parser.parse_args() if options.logfile: handler = logging.handlers.RotatingFileHandler(options.logfile, 'a', maxBytes=50*1024*1024, backupCount=5, encoding='utf-8', delay=0) else: handler = logging.StreamHandler() level = { "none": logging.NOTSET, "info": logging.INFO, "warn": logging.WARNING, "warning": logging.WARNING, "error": logging.ERROR, "none": logging.CRITICAL, "debug": logging.DEBUG, }[options.loglevel if options.loglevel else 'warning'] handler.setLevel(level) rootLogger = logging.getLogger() formatter = logging.Formatter(LOGGING_FORMAT, datefmt="%H:%M:%S") handler.setFormatter(formatter) rootLogger.addHandler(handler) rootLogger.setLevel(level) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.getLogger("twisted").setLevel(level) daemon = Daemon() daemon.run()
def setup_logging(self): log_format = ( '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s' ) if self.log_config is None: level = logging.INFO if self.verbosity: level = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logging.basicConfig(level=level, filename=self.log_file, format=log_format) else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def setup_logging(): if logging.getLoggerClass() is not Logging: import nam.common if 'dev' in nam.common.get_version(): format='%(asctime)s.%(msecs)03.0f [%(name)-30s:%(lineno)-4s] %(levelname)-7.7s: %(message)s' else: format='%(asctime)s.%(msecs)03.0f [%(name)-30s] %(levelname)-7.7s: %(message)s' logging.basicConfig( level=logging.DEBUG, datefmt='%H:%M:%S', format=format ) logging.setLoggerClass(Logging) logging.getLogger('sqlalchemy').setLevel(logging.ERROR) logging.getLogger('migrate').setLevel(logging.INFO) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.addLevelName(5, "TRACE") logging.addLevelName(1, "GARBAGE")
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 ) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def run_main_view_wx(server, rpc_port, camera_port): """ Runs main UI view based on wx framework. """ # imports import wx import socket # might be some cross platform (windows) issues reported with wxReactor from twisted.internet import wxreactor # add twisted / wx interaction support wxreactor.install() # add logging observer from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start() # add some extra logging (temp - merge later) #from sys import stdout #from twisted.python.log import startLogging, err #startLogging(stdout) # then can do normal reactor imports from twisted.internet import reactor from ui.main_view_wx import MainWindow # ip address *much* faster than by device name ipaddr = socket.gethostbyname(server) # create rpc client from web.webclient import RPCClient, RPCClientFactory rpc_client = RPCClient() # create wxApp and main window wxApp = wx.App(False) frame = MainWindow(None, "fishpi - Proof Of Concept Vehicle control", ipaddr, rpc_port, camera_port) frame.Show() # run reactor rather than usual 'wxApp.MainLoop()' reactor.registerWxApp(wxApp) logging.debug("RPC:\tconnecting to %s (%s) on port %s" % (server, ipaddr, rpc_port)) reactor.connectTCP(ipaddr, rpc_port, RPCClientFactory(frame)) #reactor.callLater(5, update_callback) reactor.run()
def setup_logging(self): log_format = ( '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s' ) if self.log_config is None: level = logging.INFO if self.verbosity: level = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logging.basicConfig( level=level, filename=self.log_file, format=log_format ) else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def after_setup_logger(logger, loglevel, **kwargs): """Celery :doc:`signal handler <celery:userguide/signals>` to set up capturing of all log messages from Comet and Twisted. * Celery uses the Python standard library's :mod:`logging` module. Twisted has its own separate logging facility. Use Twisted's :class:`~twisted.python.log.PythonLoggingObserver` to forward all Twisted log messages to the Python :mod:`logging` module. * Comet uses the Twisted logging facility, but has its own separate management of log severity level (e.g., *info*, *debug*). Set Comet's log level to match Celery's. """ comet.log.LEVEL = 10 * loglevel PythonLoggingObserver(logger.name).start()
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: handler = logging.FileHandler(self.log_file) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def main(args=None): """ Run the Engine API server """ conf = Config() # parse args if args is None: args = parse_args(sys.argv[1:]) # handle show_config if args.show_config: show_config(conf) raise SystemExit(1) # logging level if args.verbose > 1 or conf.get('verbose') > 1: set_log_debug() elif args.verbose == 1 or conf.get('verbose') == 1: set_log_info() dbconn = connect_mongodb(conf.get('mongo_host'), conf.get('mongo_port')) logger.debug("instantiating apiserver") apiserver = APIServer(dbconn) apisite = Site(apiserver.app.resource()) logger.debug("reactor.listenTCP") reactor.listenTCP(conf.get('api_port'), apisite) logger.debug("reactor.run() - listening on port %d", conf.get('api_port')) # setup Python logging observer = PythonLoggingObserver() observer.start() # run the reactor reactor.run() logger.debug("reactor.run() returned")
def setup_logging(options): # route Twisted log messages via stdlib logging if options.logfile: handler = logging.handlers.RotatingFileHandler(options.logfile, maxBytes=5*1024*1024, backupCount=5) logging.getLogger().addHandler(handler) sys.excepthook = log_exceptions logging.basicConfig() if options.verbose > 1: logging.getLogger().setLevel(logging.DEBUG) elif options.verbose: logging.getLogger().setLevel(logging.INFO) PythonLoggingObserver().start()
def init_logging(self): """ Initialize twisted logging """ #: Start twisted logger from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start()
def postOptions(self): if self.opts['config'] == "~/.afm": self.opt_config(self.opts['config']) if self.opts['quiet'] and self.opts['debug']: print "ERROR: Only pass one of '--debug' or '--quiet', not both." self.opt_help() if 'logging_level' not in self.opts: self.opts['logging_level'] = logging.INFO # Setup logging if logging.getLoggerClass() is not Logging: afm_log = logging.getLogger('afm') afm_log.setLevel(self.opts['logging_level']) if self.opts['logfile']: from logging.handlers import RotatingFileHandler handler = RotatingFileHandler( self.opts['logfile'], maxBytes=1*1024*1024, # 1 MB backupCount=5, encoding='utf-8' ) else: handler = logging.StreamHandler() handler.setLevel(self.opts['logging_level']) formatter = logging.Formatter( "%(asctime)s [%(levelname)-8s] [%(name)-15s] %(message)s", "%H:%M:%S" ) handler.setFormatter(formatter) afm_log.addHandler(handler) sqla_log = logging.getLogger('sqlalchemy') sqla_log.setLevel(logging.ERROR) if self.opts['debug']: sqla_log.setLevel(self.opts['logging_level']) # SQLA Engine Logging sqlae = logging.getLogger('sqlalchemy.engine') sqlae.setLevel(self.opts['logging_level']) sqlae.addHandler(handler) # SQLA Unit-Of-Work Logging sqlauof = logging.getLogger('sqlalchemy.orm.unitofwork') sqlauof.setLevel(self.opts['logging_level']) sqlauof.addHandler(handler) sqla_log.addHandler(handler) tw_log = logging.getLogger('twisted') tw_log.setLevel(self.opts['logging_level']) tw_log.addHandler(handler) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() if self.subOptions and not self.subOptions.subCommand == 'newca': if not db.session().query(Certificate).\ filter_by(root_ca=True).count(): SysExit("You haven't generate your Root Certificate Authority " "yet!\nPlease run the \"newca\" command")
#!/usr/bin/python import sys, os sys.path.insert(0, os.getcwd()) #Hack to make twistd work when run as root os.chdir(os.path.split(os.getcwd())[0]) #print os.path.dirname() import utils log = utils.get_logger("Twistd") from twisted.python.log import PythonLoggingObserver twistdlog = PythonLoggingObserver("Twistd") twistdlog.start() from twisted.application import service from twisted.internet import reactor graphtoolService = service.MultiService() application = service.Application("Graphtool") graphtoolService.setServiceParent(application) def addServices(): import web webServices = web.getService() for service in webServices: graphtoolService.addService(service) import opsview #import rest_api - imported in opsview
from __main__ import config import dbwfserver.resource as resource from twisted.application import internet, service from twisted.web import server, static from twisted.python.log import ILogObserver, PythonLoggingObserver for port, db in config.run_server.items(): root = resource.QueryParserResource(config, db) root.putChild('static', static.File(config.static_dir)) root.putChild('favicon.ico', resource.FaviconResource(config)) site = server.Site(root) site.displayTracebacks = config.display_tracebacks application = service.Application('dbwfserver') observer = PythonLoggingObserver('dbwfserver.twisted.port' + str(port)) application.setComponent(ILogObserver, observer.emit) sc = service.IServiceCollection(application) sc.addService(internet.TCPServer(int(port), site))
def __init__(self): PythonLoggingObserver.__init__(self, loggerName='twisted')
def setupTwistedLogging(application): """Setup a L{LogFile} for the given application. @param application: A C{twisted.application.service.Application} instance. """ application.setComponent(ILogObserver, PythonLoggingObserver(None).emit)
LOGGER.error("Error response to full image capture process: %s" % err) self.state = TreatCam.IDLE return err def trimExcessCaptureFiles(self): captures = sorted(glob(path.join(self.config.captureDir, TreatCam.CAPTURE_PREFIX + "*"))) excessCaptures = len(captures) - self.config.capturesToRetain if (excessCaptures > 0): for i in range(excessCaptures): LOGGER.info("Trimming: %s" % captures[i]) remove(captures[i]) if __name__=="__main__": from logging import Formatter, StreamHandler, INFO, DEBUG, getLogger from os import getcwd from twisted.python.log import PythonLoggingObserver logFormatter = Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt="%Y-%m-%d %H:%M:%S") rootLogger = getLogger() consoleHandler = StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) rootLogger.setLevel(DEBUG) observer = PythonLoggingObserver() observer.start() cam = TreatCam(reactor, TreatCamConfig()) cam.startMotionCapture() reactor.run()
def emit(): PythonLoggingObserver.emit(self, event_dict)
def connect(server, password=None): """ Connect to a VNCServer and return a Client instance that is usable in the main thread of non-Twisted Python Applications, EXPERIMENTAL. >>> from vncdotool import api >>> client = api.connect('host') >>> client.keyPress('c') >>> api.shutdown() You may then call any regular VNCDoToolClient method on client from your application code. If you are using a GUI toolkit or other major async library please read http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html for a better method of intergrating vncdotool. """ if not reactor.running: global _THREAD _THREAD = threading.Thread(target=reactor.run, name='Twisted', kwargs={'installSignalHandlers': False}) _THREAD.daemon = True _THREAD.start() observer = PythonLoggingObserver() observer.start() factory = VNCDoToolFactory() if password is not None: factory.password = password client = ThreadedVNCClientProxy(factory) host, port = command.parse_host(server) client.connect(host, port) return client