def postOptions(self): from baca.application import app if self.opts['config'] == "~/.ilog": self.opt_config(self.opts['config']) if not isfile(join(app.config.dir, app.config.file)): app.config_initial_populate() app.config_save() app.config_load() # Setup logging from baca.utils.logger import Logging if logging.getLoggerClass() is not Logging: logging.config.fileConfig( usefull_path(str(app.config.logging_config_file)) ) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() # self._setup_database() app.setup_log() if not self.subCommand: self.opt_help()
def initializeLogging(configFile): from logging.config import fileConfig from twisted.python.log import PythonLoggingObserver fileConfig(configFile) observer = PythonLoggingObserver() observer.start()
def connect(server, password=None): """ Connect to a VNCServer and return a Client instance that is usable in the main thread of non-Twisted Python Applications, EXPERIMENTAL. >>> from vncdotool import threaded >>> client = threaded.connect('host') >>> client.keyPress('c') >>> client.join() You may then call any regular VNCDoToolClient method on client from your application code. If you are using a GUI toolkit or other major async library please read http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html for a better method of intergrating vncdotool. """ observer = PythonLoggingObserver() observer.start() factory = VNCDoToolFactory() if password is not None: factory.password = password client = ThreadedVNCClientProxy(factory) host, port = command.parse_host(server) client.connect(host, port) client.start() return client
def setup_logs(application=None): """ Configure logging for the bot. :param application: an application object, if using twistd :type application: service.Application """ # todo arbitrary logging file obs = PythonLoggingObserver() if not application: obs.start() else: application.setComponent(ILogObserver, obs.emit) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) logging.getLogger('twistedpusher.client').setLevel(logging.INFO) file_lf = logging.Formatter("%(asctime)s %(levelname)-8s [%(name)s] %(message)s") file_lf.converter = time.gmtime file_h = logging.FileHandler('bot.log') file_h.setFormatter(file_lf) root_logger.addHandler(file_h) console_lf = logging.Formatter("%(levelname)-8s [%(name)s] %(message)s") console_h = logging.StreamHandler() console_h.setFormatter(console_lf) root_logger.addHandler(console_h)
def main(application=None): """ SMTP daemon for receiving DSN (Delivery Status Notification) :param application: optional Application instance (if used inside twistd) :type application: twisted.application.service.Application """ parser = argparse.ArgumentParser( description='Start the SMTPD process for CloudMailing.') parser.add_argument('-p', '--port', type=int, default=25, help='port number for SMTP (default: 25)') parser.add_argument('-u', '--uid', help='Change the UID of this process') parser.add_argument('-g', '--gid', help='Change the GID of this process') args = parser.parse_args() # Need to open TCP port early, before to switch user and configure log portal = Portal(SimpleRealm()) portal.registerChecker(AllowAnonymousAccess()) factory = ReturnPathSMTPFactory(portal) if application: smtpd = internet.TCPServer(args.port, factory) smtpd.setServiceParent(application) else: smtpd = reactor.listenTCP(args.port, factory) if args.uid or args.gid: uid = args.uid and pwd.getpwnam(args.uid).pw_uid or None gid = args.gid and grp.getgrnam(args.gid).gr_gid or None # for fname in os.listdir(settings.LOG_PATH): # fullname = os.path.join(settings.LOG_PATH, fname) # print fullname # if args.uid: # os.chown(fullname, args.uid, args.gid) switchUID(uid, gid) configure_logging("smtpd", settings.CONFIG_PATH, settings.LOG_PATH, settings.DEFAULT_LOG_FORMAT, False) ##Twisted logs observer = PythonLoggingObserver() observer.start() log = logging.getLogger("smtpd") log.info( "****************************************************************") log.info("Starting CloudMailing SMTPD version %s" % VERSION) log.info("Serial: %s" % settings.SERIAL) log.info("Twisted version %s", twisted.version.short()) log.info( "****************************************************************") Db.getInstance(settings.MASTER_DATABASE) log.info("CM SMTPD started on port %d", args.port)
def init_logging(self): """ Log to stdout and the file """ root = logging.getLogger() root.setLevel(logging.DEBUG) formatter = logging.Formatter(self._log_format) #: Log to stdout stream = logging.StreamHandler(sys.stdout) stream.setLevel(logging.DEBUG) stream.setFormatter(formatter) #: Log to rotating handler disk = RotatingFileHandler( self._log_filename, maxBytes=1024 * 1024 * 10, # 10 MB backupCount=10, ) disk.setLevel(logging.DEBUG) disk.setFormatter(formatter) root.addHandler(disk) root.addHandler(stream) #: Start twisted logger from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start()
def connect(server, password=None): """ Connect to a VNCServer and return a Client instance that is usable in the main thread of non-Twisted Python Applications, EXPERIMENTAL. >>> from vncdotool import threaded >>> client = threaded.connect('host') >>> client.keyPress('c') >>> client.join() You may then call any regular VNCDoToolClient method on client from your application code. If you are using a GUI toolkit or other major async library please read http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html for a better method of intergrating vncdotool. """ observer = PythonLoggingObserver() observer.start() factory = VNCDoToolFactory() if password is not None: factory.password = password client = ThreadedVNCClientProxy(factory) host, port = command.parse_host(server) client.connect(host, port) client.start() return client
def twisted_logging(logger, level=None): from twisted.python.log import PythonLoggingObserver try: if level is not None: logger.getChild('twisted').setLevel(level) observer = PythonLoggingObserver(logger.name + '.twisted') observer.start() except: logger.critical("Could not add twisted observer!", exc_info=True)
def main(application=None): """ Startup sequence for CM Master :param application: optional Application instance (if used inside twistd) :type application: twisted.application.service.Application """ parser = argparse.ArgumentParser(description='Start the Master process for CloudMailing.') parser.add_argument('-p', '--port', type=int, default=33620, help='port number for Master MailingManager (default: 33620)') parser.add_argument('--api-interface', default='', help='network interface (IP address) on which API should listen (default: <empty> = all)') parser.add_argument('--api-port', type=int, default=33610, help='port number for API (default: 33610)') parser.add_argument('--api-dont-use-ssl', action='store_true', default=False, help='ask API to not use secure port (SSL)') args = parser.parse_args() configure_logging("master", settings.CONFIG_PATH, settings.LOG_PATH, settings.DEFAULT_LOG_FORMAT, False) ##Twisted logs observer = PythonLoggingObserver() observer.start() log = logging.getLogger("cm") log.info("****************************************************************") log.info("Starting CloudMailing version %s" % VERSION ) log.info("Serial: %s" % settings.SERIAL) log.info("Twisted version %s", twisted.version.short()) log.info("****************************************************************") ssl_context_factory = make_SSL_context() db_conn = None while not db_conn: try: db_conn = connect(settings.MASTER_DATABASE) init_master_db(db_conn[settings.MASTER_DATABASE]) log.info("Connected to database '%s'", settings.MASTER_DATABASE) except (pymongo.errors.ConnectionFailure, pymongo.errors.ServerSelectionTimeoutError): log.error("Failed to connect to database server!") # special case for MailFountain hardward only if os.path.exists("/data/mongodb/mongod.lock"): os.remove("/data/mongodb/mongod.lock") os.system('su -m mongodb -c "mongod --config /usr/local/etc/mongodb.conf --dbpath /data/mongodb/ --repair"') os.system("service mongod start") else: log.info(" Trying again in 5 seconds...") time.sleep(5) Db.getInstance(settings.MASTER_DATABASE, pool_size=10, watchdog_timeout=60) # attach the service to its parent application apiService = get_api_service(application, port=args.api_port, interface=args.api_interface, ssl_context_factory=not args.api_dont_use_ssl and ssl_context_factory or None) start_master_service(application, master_port=args.port, ssl_context_factory=ssl_context_factory)
def makeService(config): if config['logging']: logging.config.fileConfig(config['logging']) else: logging.basicConfig(level=config.loglevel) obs = PythonLoggingObserver() obs.start() config = loader.load_config(config['config']) loader.setup_environment(config) return runner.getService(config, reactor)
def makeService(config): if config['logging']: logging.config.fileConfig(config['logging']) else: logging.basicConfig(level=config.loglevel) obs = PythonLoggingObserver() obs.start() config = loader.load_config(config['config']) loader.setup_environment(config) return runner.getService(config, reactor)
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 ) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def setupLogger(level="error", filename=None, filemode="w"): """ Sets up the basic logger and if `:param:filename` is set, then it will log to that file instead of stdout. :param level: str, the level to log :param filename: str, the file to log to """ import logging if logging.getLoggerClass() is not Logging: logging.setLoggerClass(Logging) logging.addLevelName(5, 'TRACE') logging.addLevelName(1, 'GARBAGE') level = levels.get(level, logging.ERROR) rootLogger = logging.getLogger() if filename and filemode=='a': import logging.handlers handler = logging.handlers.RotatingFileHandler( filename, filemode, maxBytes=50*1024*1024, # 50 Mb backupCount=3, encoding='utf-8', delay=0 ) elif filename and filemode=='w': import logging.handlers handler = getattr( logging.handlers, 'WatchedFileHandler', logging.FileHandler)( filename, filemode, 'utf-8', delay=0 ) else: handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter( DEFAULT_LOGGING_FORMAT % MAX_LOGGER_NAME_LENGTH, datefmt="%H:%M:%S" ) handler.setFormatter(formatter) rootLogger.addHandler(handler) rootLogger.setLevel(level) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.getLogger("twisted").setLevel(level)
def run(self): root = Resource() root.putChild("key", LocalKey(self)) site = server.Site(root) reactor.listenSSL(self.bind_port, site, self.ssl_context_factory, interface=self.bind_host) logging.basicConfig(level=logging.DEBUG) observer = PythonLoggingObserver() observer.start() reactor.run()
def run(self): root = Resource() root.putChild("key", LocalKey(self)) site = server.Site(root) reactor.listenSSL( self.bind_port, site, self.ssl_context_factory, interface=self.bind_host ) logging.basicConfig(level=logging.DEBUG) observer = PythonLoggingObserver() observer.start() reactor.run()
def run_main_view_wx(config): """ Runs main UI view based on wx framework. """ # imports import wx import socket # might be some cross platform (windows) issues reported with wxReactor from twisted.internet import wxreactor # add twisted / wx interaction support wxreactor.install() # add logging observer from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start() # then can do normal reactor imports from twisted.internet import reactor # and wx specific implementations from ui.view_model_wx import MainViewController from ui.view_model_wx import MainViewModel from ui.main_view_wx import MainWindow # ip address *much* faster than by device name ipaddr = socket.gethostbyname(config.server_name) logging.debug("RPC:\tServer name %s resolved to IP address %s" % (config.server_name, ipaddr)) # create rpc client from web.webclient import RPCClient, RPCClientFactory rpc_client = RPCClient() # create view model view_model = MainViewModel() # create view controller controller = MainViewController(rpc_client, view_model, config) # create wxApp and main window wxApp = wx.App(False) frame = MainWindow(None, "fishpi - Proof Of Concept Vehicle control", controller, ipaddr, config.rpc_port, config.camera_port) frame.Show() # run reactor rather than usual 'wxApp.MainLoop()' reactor.registerWxApp(wxApp) logging.debug("RPC:\tconnecting to %s (%s) on port %s" % (config.server_name, ipaddr, config.rpc_port)) reactor.connectTCP(ipaddr, config.rpc_port, RPCClientFactory(controller)) #reactor.callLater(5, update_callback) reactor.run()
def run_backend(bypass_checks=False, flags_dict=None, frontend_pid=None): """ Run the backend for the application. :param bypass_checks: whether we should bypass the checks or not :type bypass_checks: bool :param flags_dict: a dict containing the flag values set on app start. :type flags_dict: dict """ # In the backend, we want all the components to log into logbook # that is: logging handlers and twisted logs from logbook.compat import redirect_logging from twisted.python.log import PythonLoggingObserver redirect_logging() observer = PythonLoggingObserver() observer.start() # NOTE: this needs to be used here, within the call since this function is # executed in a different process and it seems that the process/thread # identification isn't working 100% logger = get_logger() # noqa # The backend is the one who always creates the certificates. Either if it # is run separately or in a process in the same app as the frontend. if flags.ZMQ_HAS_CURVE: generate_zmq_certificates() # ignore SIGINT since app.py takes care of signaling SIGTERM to us. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal_handler) if flags_dict is not None: dict_to_flags(flags_dict) # HACK we should be able to run the ensure_server anyway but right now it # breaks if we run it twice. if not flags.STANDALONE: # start the events server # This is not needed for the standalone bundle since the launcher takes # care of it. event_server.ensure_server() backend = LeapBackend(bypass_checks=bypass_checks, frontend_pid=frontend_pid) backend.run()
def app_main(self, config, options, args): # TODO: deal with new user registrations by listening to amqp and schedule the rest observer = PythonLoggingObserver() observer.start() # initialize some important maps db.configure_session(db.create_url_from_config(config['db'])) service.initialize() post_type.initialize() # Grab twitter consumer keys self.consumer_key = config['oauth']['twitter']['key'] self.consumer_secret = config['oauth']['twitter']['secret'] self.default_token_key = config['oauth']['twitter']['default_access_token'] self.default_token_secret = config['oauth']['twitter']['default_access_token_secret'] # Grab feed configuration self.wait_on_collector_query_delay = float(config['feed']['wait_on_collector_query_delay']) # Configure amqp amqp_host = config['amqp']['broker']['host'] amqp_port = int(config['amqp']['broker']['port']) amqp_spec = message_queue.create_spec_path(config['amqp']['broker']['spec']) self.amqp_exchange = config['amqp']['exchange']['name'] self.amqp = AmqpFactory(host=amqp_host, port=amqp_port, spec_file=amqp_spec) db_host = config['db']['host'] db_user = config['db']['user'] db_passwd = config['db']['password'] db_database = config['db']['database'] db_unicode = to_bool(config['db']['unicode']) self.db_pool = adbapi.ConnectionPool( 'MySQLdb', host=db_host, user=db_user, passwd=db_passwd, db=db_database, use_unicode=db_unicode, cp_noisy=True) self._process_twitter_users() reactor.run()
def setup_logger(name): if DEBUGLOG: logging.basicConfig( level=logging.DEBUG, format=name + ' %(asctime)-15s %(levelname)-8s %(message)s', ) disabled_loggers = [] if not PIKALOG: disabled_loggers.append('pika') for logger in disabled_loggers: logging.getLogger(logger).setLevel(logging.WARNING) new_defaults = list(Server.__init__.__defaults__) # NOTE: Patch `action_logger` argument default value. new_defaults[6] = AccessLogGenerator(sys.stdout) Server.__init__.__defaults__ = tuple(new_defaults) observer = PythonLoggingObserver(loggerName='twisted') observer.start()
def run_backend(bypass_checks=False, flags_dict=None, frontend_pid=None): """ Run the backend for the application. This is called from the main app.py entrypoint, and is run in a child subprocess. :param bypass_checks: whether we should bypass the checks or not :type bypass_checks: bool :param flags_dict: a dict containing the flag values set on app start. :type flags_dict: dict """ # In the backend, we want all the components to log into logbook # that is: logging handlers and twisted logs from logbook.compat import redirect_logging from twisted.python.log import PythonLoggingObserver redirect_logging() observer = PythonLoggingObserver() observer.start() if flags_dict is not None: dict_to_flags(flags_dict) common_flags.STANDALONE = flags.STANDALONE # NOTE: this needs to be used here, within the call since this function is # executed in a different process and it seems that the process/thread # identification isn't working 100% logger = get_logger() # noqa # The backend is the one who always creates the certificates. Either if it # is run separately or in a process in the same app as the frontend. if flags.ZMQ_HAS_CURVE: generate_zmq_certificates() # ignore SIGINT since app.py takes care of signaling SIGTERM to us. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal_handler) reactor.callWhenRunning(start_events_and_updater, logger) backend = LeapBackend(bypass_checks=bypass_checks, frontend_pid=frontend_pid) backend.run()
def main(client_class): startLogging(stdout) observer = PythonLoggingObserver() observer.start() config_file = path.join(environ['EXPERIMENT_DIR'], "logger.conf") # TODO(emilon): Document this on the user manual if path.exists(config_file): msg("This experiment has a logger.conf, using it.") logging.config.fileConfig(config_file) else: msg("No logger.conf found for this experiment.") factory = ExperimentClientFactory({}, client_class) msg("Connecting to: %s:%s" % (environ['SYNC_HOST'], int(environ['SYNC_PORT']))) reactor.connectTCP(environ['SYNC_HOST'], int(environ['SYNC_PORT']), factory) reactor.exitCode = 0 reactor.run() exit(reactor.exitCode)
def postOptions(self): if self.opts['config'] == "~/.ilog": self.opt_config(self.opts['config']) # Setup logging from ilog.utils.logger import Logging if logging.getLoggerClass() is not Logging: logging.config.fileConfig( usefull_path(str(app.config.logging_config_file)) ) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() self._setup_database() if not self.subCommand: self.opt_help()
def main(): parser = OptionParser(usage="%prog [options] [actions]", version= "%prog: " + common.get_version()) parser.add_option("-l", "--logfile", dest="logfile", help="Set the logfile location", action="store", type="str") parser.add_option("-L", "--loglevel", dest="loglevel", help="Set the log level: none, info, warning, error, critical, debug", action="store", type="str") (options, args) = parser.parse_args() if options.logfile: handler = logging.handlers.RotatingFileHandler(options.logfile, 'a', maxBytes=50*1024*1024, backupCount=5, encoding='utf-8', delay=0) else: handler = logging.StreamHandler() level = { "none": logging.NOTSET, "info": logging.INFO, "warn": logging.WARNING, "warning": logging.WARNING, "error": logging.ERROR, "none": logging.CRITICAL, "debug": logging.DEBUG, }[options.loglevel if options.loglevel else 'warning'] handler.setLevel(level) rootLogger = logging.getLogger() formatter = logging.Formatter(LOGGING_FORMAT, datefmt="%H:%M:%S") handler.setFormatter(formatter) rootLogger.addHandler(handler) rootLogger.setLevel(level) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.getLogger("twisted").setLevel(level) daemon = Daemon() daemon.run()
def setup_logging(self): log_format = ( '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s' ) if self.log_config is None: level = logging.INFO if self.verbosity: level = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logging.basicConfig(level=level, filename=self.log_file, format=log_format) else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def setup_logging(): if logging.getLoggerClass() is not Logging: import nam.common if 'dev' in nam.common.get_version(): format='%(asctime)s.%(msecs)03.0f [%(name)-30s:%(lineno)-4s] %(levelname)-7.7s: %(message)s' else: format='%(asctime)s.%(msecs)03.0f [%(name)-30s] %(levelname)-7.7s: %(message)s' logging.basicConfig( level=logging.DEBUG, datefmt='%H:%M:%S', format=format ) logging.setLoggerClass(Logging) logging.getLogger('sqlalchemy').setLevel(logging.ERROR) logging.getLogger('migrate').setLevel(logging.INFO) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() logging.addLevelName(5, "TRACE") logging.addLevelName(1, "GARBAGE")
def run_main_view_wx(server, rpc_port, camera_port): """ Runs main UI view based on wx framework. """ # imports import wx import socket # might be some cross platform (windows) issues reported with wxReactor from twisted.internet import wxreactor # add twisted / wx interaction support wxreactor.install() # add logging observer from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start() # add some extra logging (temp - merge later) #from sys import stdout #from twisted.python.log import startLogging, err #startLogging(stdout) # then can do normal reactor imports from twisted.internet import reactor from ui.main_view_wx import MainWindow # ip address *much* faster than by device name ipaddr = socket.gethostbyname(server) # create rpc client from web.webclient import RPCClient, RPCClientFactory rpc_client = RPCClient() # create wxApp and main window wxApp = wx.App(False) frame = MainWindow(None, "fishpi - Proof Of Concept Vehicle control", ipaddr, rpc_port, camera_port) frame.Show() # run reactor rather than usual 'wxApp.MainLoop()' reactor.registerWxApp(wxApp) logging.debug("RPC:\tconnecting to %s (%s) on port %s" % (server, ipaddr, rpc_port)) reactor.connectTCP(ipaddr, rpc_port, RPCClientFactory(frame)) #reactor.callLater(5, update_callback) reactor.run()
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 ) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def setup_logging(self): log_format = ( '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s' ) if self.log_config is None: level = logging.INFO if self.verbosity: level = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logging.basicConfig( level=level, filename=self.log_file, format=log_format ) else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def main(args=None): """ Run the Engine API server """ conf = Config() # parse args if args is None: args = parse_args(sys.argv[1:]) # handle show_config if args.show_config: show_config(conf) raise SystemExit(1) # logging level if args.verbose > 1 or conf.get('verbose') > 1: set_log_debug() elif args.verbose == 1 or conf.get('verbose') == 1: set_log_info() dbconn = connect_mongodb(conf.get('mongo_host'), conf.get('mongo_port')) logger.debug("instantiating apiserver") apiserver = APIServer(dbconn) apisite = Site(apiserver.app.resource()) logger.debug("reactor.listenTCP") reactor.listenTCP(conf.get('api_port'), apisite) logger.debug("reactor.run() - listening on port %d", conf.get('api_port')) # setup Python logging observer = PythonLoggingObserver() observer.start() # run the reactor reactor.run() logger.debug("reactor.run() returned")
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: handler = logging.FileHandler(self.log_file) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
#!/usr/bin/python import sys, os sys.path.insert(0, os.getcwd()) #Hack to make twistd work when run as root os.chdir(os.path.split(os.getcwd())[0]) #print os.path.dirname() import utils log = utils.get_logger("Twistd") from twisted.python.log import PythonLoggingObserver twistdlog = PythonLoggingObserver("Twistd") twistdlog.start() from twisted.application import service from twisted.internet import reactor graphtoolService = service.MultiService() application = service.Application("Graphtool") graphtoolService.setServiceParent(application) def addServices(): import web webServices = web.getService() for service in webServices: graphtoolService.addService(service) import opsview #import rest_api - imported in opsview
import argparse import importlib import logging from twisted.internet.endpoints import StandardIOEndpoint, serverFromString from twisted.python.log import PythonLoggingObserver from twisted.spread import jelly, pb from . import BrowserManager logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) log_observer = PythonLoggingObserver() log_observer.start() parser = argparse.ArgumentParser() parser.add_argument('--browser-engine', default='.qt') parser.add_argument('address') args = parser.parse_args() browser_engine = importlib.import_module(args.browser_engine, package=__package__) setup_pre_reactor = getattr(browser_engine, '_setup_pre_reactor', None) if setup_pre_reactor: setup_pre_reactor() from twisted.internet import reactor if args.address == 'stdio': server_endpoint = StandardIOEndpoint(reactor)
import json import hashlib import struct import warnings import logging from twisted.internet import protocol from twisted.python.log import PythonLoggingObserver _observer = PythonLoggingObserver() _observer.start() DEFAULT_PORT = 25005 # random network utlity classes and functions class DigestDict(dict): """This is a docstring. This whole class is unused.""" def digest(self): """This is also a docstring. """ # Given a dictionary, inserts a json digest, assuming that # the element 'digest' contains N zeros, and it's printed # with indent 4, sorted # where N is the hash length warnings.warn("DigestDict is deprecated",DeprecationWarning,stacklevel=2) dcopy = self.copy() hasher = hashlib.new('md5')
LOGGER.error("Error response to full image capture process: %s" % err) self.state = TreatCam.IDLE return err def trimExcessCaptureFiles(self): captures = sorted(glob(path.join(self.config.captureDir, TreatCam.CAPTURE_PREFIX + "*"))) excessCaptures = len(captures) - self.config.capturesToRetain if (excessCaptures > 0): for i in range(excessCaptures): LOGGER.info("Trimming: %s" % captures[i]) remove(captures[i]) if __name__=="__main__": from logging import Formatter, StreamHandler, INFO, DEBUG, getLogger from os import getcwd from twisted.python.log import PythonLoggingObserver logFormatter = Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt="%Y-%m-%d %H:%M:%S") rootLogger = getLogger() consoleHandler = StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) rootLogger.setLevel(DEBUG) observer = PythonLoggingObserver() observer.start() cam = TreatCam(reactor, TreatCamConfig()) cam.startMotionCapture() reactor.run()
import json import hashlib import struct import warnings import logging from twisted.internet import protocol from twisted.python.log import PythonLoggingObserver _observer = PythonLoggingObserver() _observer.start() DEFAULT_PORT = 25005 # random network utlity classes and functions class DigestDict(dict): """This is a docstring. This whole class is unused.""" def digest(self): """This is also a docstring. """ # Given a dictionary, inserts a json digest, assuming that # the element 'digest' contains N zeros, and it's printed # with indent 4, sorted # where N is the hash length warnings.warn("DigestDict is deprecated", DeprecationWarning,
import sys import os sys.path.insert(0, os.getcwd()) # Hack to make twistd work when run as root os.chdir(os.path.split(os.getcwd())[0]) import utils log = utils.get_logger("Twistd") mode = utils.config.get('general','mode') from twisted.python.log import PythonLoggingObserver twistdlog = PythonLoggingObserver("Twistd") twistdlog.start() from twisted.application import service from twisted.internet import reactor if mode == 'test': testMode = True else: testMode = False dbManagerSvc = service.MultiService() application = service.Application("proc2statsd") dbManagerSvc.setServiceParent(application) def addServices(): """ """
def init_logging(self): """ Initialize twisted logging """ #: Start twisted logger from twisted.python.log import PythonLoggingObserver observer = PythonLoggingObserver() observer.start()
def postOptions(self): if self.opts['config'] == "~/.afm": self.opt_config(self.opts['config']) if self.opts['quiet'] and self.opts['debug']: print "ERROR: Only pass one of '--debug' or '--quiet', not both." self.opt_help() if 'logging_level' not in self.opts: self.opts['logging_level'] = logging.INFO # Setup logging if logging.getLoggerClass() is not Logging: afm_log = logging.getLogger('afm') afm_log.setLevel(self.opts['logging_level']) if self.opts['logfile']: from logging.handlers import RotatingFileHandler handler = RotatingFileHandler( self.opts['logfile'], maxBytes=1*1024*1024, # 1 MB backupCount=5, encoding='utf-8' ) else: handler = logging.StreamHandler() handler.setLevel(self.opts['logging_level']) formatter = logging.Formatter( "%(asctime)s [%(levelname)-8s] [%(name)-15s] %(message)s", "%H:%M:%S" ) handler.setFormatter(formatter) afm_log.addHandler(handler) sqla_log = logging.getLogger('sqlalchemy') sqla_log.setLevel(logging.ERROR) if self.opts['debug']: sqla_log.setLevel(self.opts['logging_level']) # SQLA Engine Logging sqlae = logging.getLogger('sqlalchemy.engine') sqlae.setLevel(self.opts['logging_level']) sqlae.addHandler(handler) # SQLA Unit-Of-Work Logging sqlauof = logging.getLogger('sqlalchemy.orm.unitofwork') sqlauof.setLevel(self.opts['logging_level']) sqlauof.addHandler(handler) sqla_log.addHandler(handler) tw_log = logging.getLogger('twisted') tw_log.setLevel(self.opts['logging_level']) tw_log.addHandler(handler) logging.setLoggerClass(Logging) twisted_logging = PythonLoggingObserver('twisted') twisted_logging.start() if self.subOptions and not self.subOptions.subCommand == 'newca': if not db.session().query(Certificate).\ filter_by(root_ca=True).count(): SysExit("You haven't generate your Root Certificate Authority " "yet!\nPlease run the \"newca\" command")
def connect(server, password=None): """ Connect to a VNCServer and return a Client instance that is usable in the main thread of non-Twisted Python Applications, EXPERIMENTAL. >>> from vncdotool import api >>> client = api.connect('host') >>> client.keyPress('c') >>> api.shutdown() You may then call any regular VNCDoToolClient method on client from your application code. If you are using a GUI toolkit or other major async library please read http://twistedmatrix.com/documents/13.0.0/core/howto/choosing-reactor.html for a better method of intergrating vncdotool. """ if not reactor.running: global _THREAD _THREAD = threading.Thread(target=reactor.run, name='Twisted', kwargs={'installSignalHandlers': False}) _THREAD.daemon = True _THREAD.start() observer = PythonLoggingObserver() observer.start() factory = VNCDoToolFactory() if password is not None: factory.password = password client = ThreadedVNCClientProxy(factory) host, port = command.parse_host(server) client.connect(host, port) return client