def logToStdout(): def observer(event): stdout.write( formatEvent(event, includeType=True) + '\n' ) stdout.flush() startLoggingWithObserver(observer)
def logTwisted(): """ Integrate twisted's logger with our logger. This is done in a separate method because calling this imports and sets up a reactor. Since we want basic logging working before choosing a reactor, we need to separate these. """ global _initializedTwisted if _initializedTwisted: return debug('log', 'Integrating twisted logger') # integrate twisted's logging with us from twisted.python import log as tlog # this call imports the reactor # that is why we do this in a separate method from twisted.spread import pb # we don't want logs for pb.Error types since they # are specifically raised to be handled on the other side observer = _getTheTwistedLogObserver() observer.ignoreErrors([pb.Error, ]) tlog.startLoggingWithObserver(observer.emit, False) _initializedTwisted = True
def __init__(self): cfg = config() # protocol^Wwhatever instances are kept here for the interact feature self.sessions = {} # for use by the uptime command self.starttime = time.time() # load db loggers self.dbloggers = [] for x in cfg.sections(): if not x.startswith('database_'): continue engine = x.split('_')[1] dbengine = 'database_' + engine lcfg = ConfigParser.ConfigParser() lcfg.add_section(dbengine) for i in cfg.options(x): lcfg.set(dbengine, i, cfg.get(x, i)) lcfg.add_section('honeypot') for i in cfg.options('honeypot'): lcfg.set('honeypot', i, cfg.get('honeypot', i)) print 'Loading dblog engine: %s' % (engine,) dblogger = __import__( 'kippo.dblog.%s' % (engine,), globals(), locals(), ['dblog']).DBLogger(lcfg) log.startLoggingWithObserver(dblogger.emit, setStdout=False) self.dbloggers.append(dblogger)
def main(): """ Main function to be run if __name__ == "__main__". """ config = WorkerOptions() config.parseOptions() from twisted.trial._dist.worker import WorkerProtocol workerProtocol = WorkerProtocol(config['force-gc']) protocolIn = os.fdopen(_WORKER_AMP_STDIN) protocolOut = os.fdopen(_WORKER_AMP_STDOUT, 'w') workerProtocol.makeConnection(FileWrapper(protocolOut)) observer = WorkerLogObserver(workerProtocol) startLoggingWithObserver(observer.emit, False) while True: try: r = protocolIn.read(1) except IOError, e: if e.args[0] == errno.EINTR: sys.exc_clear() continue else: raise if r == '': break else: workerProtocol.dataReceived(r) protocolOut.flush() sys.stdout.flush() sys.stderr.flush()
def set_log_file(logFile, setStdout=True, start=True): """Set up twisted log files in a standard way. This borrows functionality from the twisted.application.app AppLogger.start() method, which in turn uses the twisted.scripts._twistd_unix.UnixAppLogger._getLogObserver method. That method returns a FileLog that is hard-coded to set a log rotation of 1 MB. While we're here anyway, overriding that as well to remove the rotation size. We rotate logs with an external logrotate command that sends SIGUSR1 to tell the broker to reopen the log file. """ log_file = logfile.LogFile.fromFullPath(logFile, rotateLength=0) observer = log.FileLogObserver(log_file).emit try: import signal except ImportError: pass else: # Override if signal is set to None or SIG_DFL (0) if not signal.getsignal(signal.SIGUSR1): def restartLog(signal, frame): log_file.flush() try: log_file.close() except: pass log_file._openFile() signal.signal(signal.SIGUSR1, restartLog) if start: log.startLoggingWithObserver(observer, setStdout=setStdout) return observer
def main(argv): observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) wsgi_application = WsgiApplication(soap_application) return run_twisted( [ (wsgi_application, url) ], port )
def startLogging(self, filePath=None, stealStdio=False, printToConsole=True): ''' Begin logging. The output class is ready to go out of the box, but in order to prevent mere imports from stealing stdio or console logging to vanish these must be manually turned on. :param filePath: if provided, begin logging to the given directory. If not provided, do not write out logs. :type filePath: str :param stealStdio: choose to intercept stdio (including vanilla print statements) or allow it to passthrough :type stealStdio: bool. :param printToConsole: output the results of all logging to the console. This is primarily a performance consideration when running in production :type printToConsole: bool. ''' # Initialize printer thread self.__dict__['logpath'] = None if filePath is not None: self.__dict__['queue'] = queue.Queue() self.__dict__['printer'] = PrintLogThread(filePath, self.queue, LOG_NAME) self.__dict__['logpath'] = filePath self.printer.start() # by default, stdio gets captures. This can be toggled off self.stealStdio(stealStdio) self.logToConsole(printToConsole) # Override twisted logging (allows us to cleanly catch all exceptions) # This must come after the setattr calls so we get the wrapped object log.startLoggingWithObserver(self.twisted, setStdout=False) log.startLoggingWithObserver(self.twistedErr, setStdout=False)
def connectionMade(self): ReceiveLineProtocol.connectionMade(self) self.keyHandlers['\x03'] = self.handle_INT # Control-C self.keyHandlers['\x04'] = self.handle_EOF # Control-D self.keyHandlers['\x1c'] = self.handle_QUIT # Control-\ self.keyHandlers['\x0c'] = self.handle_FF # Control-L #self.keyHandlers['\t' ] = self.handle_TAB # Tab if self.emulate == "emacs": # EMACS key bindinds self.keyHandlers['\x10'] = self.handle_UP # Control-P self.keyHandlers['\x0e'] = self.handle_DOWN # Control-N self.keyHandlers['\x02'] = self.handle_LEFT # Control-B self.keyHandlers['\x06'] = self.handle_RIGHT # Control-F self.keyHandlers['\x01'] = self.handle_HOME # Control-A self.keyHandlers['\x05'] = self.handle_END # Control-E def observer(event): if not event["isError"]: return text = log.textFromEventDict(event) if text is None: return self.service.reactor.callFromThread(self.terminal.write, text) log.startLoggingWithObserver(observer)
def main(argv): from twisted.python import log from twisted.web.server import Site from twisted.web.static import File from twisted.internet import reactor from twisted.python import log observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) static_dir = os.path.abspath('.') logging.info("registering static folder %r on /" % static_dir) root = File(static_dir) wr = TwistedWebResource(soap11_application) logging.info("registering %r on /%s" % (wr, url)) root.putChild(url, wr) site = Site(root) if port[0] == 0: port[0] = get_open_port() reactor.listenTCP(port[0], site) logging.info("listening on: %s:%d" % (host,port)) return reactor.run()
def __init__(self): """ Constructor __init__(ServerTwisted) :since: v1.0.0 """ AbstractServer.__init__(self) self.log_observer = None """ @TODO """ self.reactor = None """ Twisted reactor instance """ self.thread_pool = None """ @TODO """ log_handler = NamedLoader.get_singleton("dNG.data.logging.LogHandler", False) if (log_handler is not None): log_handler.add_logger("twisted") self.log_observer = log.PythonLoggingObserver("twisted") self.log_observer.start() log.startLoggingWithObserver(self.log_observer.emit, setStdout = False)
def hook_twisted(levels=None, redirect_stdout=0): _ensure_main_logger() if levels: set_levels(levels) from twisted.python import log plo = log.PythonLoggingObserver(TWISTED_CATEGORY) log.startLoggingWithObserver(plo.emit, setStdout=redirect_stdout)
def startLogging(self, observer): """ Initialize the logging system. DEPRECATED. @param observer: The observer to add to the logging system. """ log.startLoggingWithObserver(observer) self.logger._initialLog()
def start(logfile=None, loglevel='INFO', logstdout=True, encoding='utf-8'): loglevel = _get_log_level(loglevel) file = open(logfile, 'ab') if logfile else sys.stderr observer = CrawlmiFileLogObserver(file, loglevel, encoding) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(observer.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning return observer
def initialize(services, tns='spyne.examples.twisted.resource'): logging.basicConfig(level=logging.DEBUG) logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG) observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) return Application(services, 'spyne.examples.twisted.hello', in_protocol=HttpRpc(), out_protocol=HttpRpc())
def startLogging(self, observer): """ Initialize the logging system. @param observer: The observer to add to the logging system. """ log.startLoggingWithObserver(observer) sys.stdout.flush() initialLog()
def _setUpLogFile(self): self._tearDownLogFile() if self.logfile == '-': logFile = sys.stdout else: logFile = file(self.logfile, 'a') self._logFileObject = logFile self._logFileObserver = log.FileLogObserver(logFile) log.startLoggingWithObserver(self._logFileObserver.emit, 0)
def start(logfile=None, loglevel='INFO', logstdout=True, logencoding='utf-8'): if log.defaultObserver: # check twisted log not already started loglevel = _get_log_level(loglevel) file = open(logfile, 'a') if logfile else sys.stderr sflo = ScrapyFileLogObserver(file, loglevel, logencoding) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(sflo.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning
def set_up_logging_for_script(options, name, logfile): """Create a `Logger` object and configure twisted to use it. This also configures oops reporting to use the section named 'name'.""" logger_object = logger(options, name) observer = set_up_oops_reporting(name, name, logfile) log.startLoggingWithObserver(observer) return logger_object
def start(logfile=None, loglevel='INFO', logstdout=True, logencoding='utf-8', crawler=None): loglevel = _get_log_level(loglevel) file = open(logfile, 'a') if logfile else sys.stderr sflo = ScrapyFileLogObserver(file, loglevel, logencoding, crawler) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(sflo.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning return sflo
def setup_log(self, name): try: self.log_file = open(name, 'a') self.log_observer = log.FileLogObserver(self.log_file) log.startLoggingWithObserver(self.log_observer.emit) except: msg = "Error in setup_log:\n%s" % traceback.format_exc() print msg mail.error(msg)
def startLogging(*args, **kw): warnings.warn( """ Use ApplicationRunner instead of startLogging." """, category=PendingDeprecationWarning, stacklevel=2) observer = _getLogObserver(*args, **kw) log.startLoggingWithObserver(observer) sys.stdout.flush()
def startLogging(prefix="Twisted", options=DEFAULT_OPTIONS, facility=DEFAULT_FACILITY, setStdout=1): """ Send all Twisted logging output to syslog from now on. The prefix, options and facility arguments are passed to C{syslog.openlog()}, see the Python syslog documentation for details. For other parameters, see L{twisted.python.log.startLoggingWithObserver}. """ obs = SyslogObserver(prefix, options, facility) log.startLoggingWithObserver(obs.emit, setStdout=setStdout)
def main(): logging.basicConfig(filename="client.log", level=logging.CRITICAL) #observer = log.PythonLoggingObserver() #observer.start() loginname = argv[1] server = argv[2] port = int(argv[3]) botname = None if len(argv) >= 5: botname = argv[4] if ENABLE_AUTH: password = getpass() logging.info("Logging in") params = urllib.urlencode({'user': loginname, 'password': password, 'version': 9001}) handler = urllib.urlopen("http://login.minecraft.net/", params) ret = handler.read() logging.debug(ret) if ret == "Bad login1": logging.error(ret) return -1 version, downloadTicket, username, sessionId= ret.split(":") # version = 22 # downloadTicket = "" # username = "******" # sessionId = 1111 logging.info("Got %r %r %r %r" % (version, downloadTicket, username, sessionId)) if not botname: botname = username else: sessionId = 0 if not botname: botname = loginname username = botname interfaceNamespace = {} f = BotFactory(username, sessionId, botname, interfaceNamespace) logging.info("tcptwisted") reactor.connectTCP(server, port, f) logging.info("tcptwisted") if ENABLE_CONSOLE: #start with a null oberserver to remove DefaultObserver #because we can't stderr in a terminal log.startLoggingWithObserver(lambda a: '') runReactorWithTerminal(CommandLineBotInterface, interfaceNamespace) else: reactor.run()
def startLogging(path, filename, debug, capture_stdout=True): log.init(path, filename, debug, not capture_stdout) if debug: twisted_log.startLoggingWithObserver(twisted_debug_observer, setStdout=capture_stdout) else: twisted_log.startLoggingWithObserver(twisted_log_observer, setStdout=capture_stdout) log.msg('LogStart;%s', filename) twisted_log.msg('TwistedLogStart') return log
def startStdio(self, reactor, interface): sio = stdio.StandardIO(interface, reactor=reactor) sio.flush = lambda: None def formatLogEvent(e): if 'failure' not in e: return text = log.textFromEventDict(e) sio.write('### ERROR ###\n%s\n' % (text,)) log.startLoggingWithObserver(formatLogEvent, setStdout=False)
def setup_logging(): if not os.path.exists(settings.LOG_DIR): os.makedirs(settings.LOG_DIR) if settings.LOG_FILE: logfile = DailyLogFile.fromFullPath( os.path.join(settings.LOG_DIR, settings.LOG_FILE) ) else: logfile = sys.stderr observer = ScrapyrtFileLogObserver(logfile) startLoggingWithObserver(observer.emit, setStdout=False)
def startLogging(prefix='Twisted', options=DEFAULT_OPTIONS, facility=DEFAULT_FACILITY, setStdout=1): """ Send all Twisted logging output to syslog from now on. The prefix, options and facility arguments are passed to C{syslog.openlog()}, see the Python syslog documentation for details. For other parameters, see L{twisted.python.log.startLoggingWithObserver}. """ obs = SyslogObserver(prefix, options, facility) log.startLoggingWithObserver(obs.emit, setStdout=setStdout)
def __init__(self): self._obs = logging.RenamerObserver() log.startLoggingWithObserver(self._obs.emit, setStdout=False) self.options = self.parseOptions() self.store = Store(os.path.expanduser('~/.renamer/renamer.axiom')) # XXX: One day there might be more than one History item. self.history = self.store.findOrCreate(History) self.args = getattr(self.options, 'args', []) self.command = self.getCommand(self.options)
def __init__(self, reactor, priority, appname, transports): self.reactor = reactor self.priority = priority self.appname = appname self.transports = transports self.observer = Observer(self.reactor, self.priority, self.appname, self.transports) _log.msg = self.msg _log.err = self.err _log.startLoggingWithObserver(self.observer.emit, setStdout = 1)
def start(logfile=LOG_FILE, loglevel=LOG_LEVEL, logstdout=LOG_STDOUT, logencoding=LOG_ENCODING, crawler=None): loglevel = _get_log_level(loglevel) file = open(logfile, 'a') if logfile else sys.stderr sflo = ScrapyFileLogObserver(file, loglevel, logencoding, crawler) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(sflo.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning return sflo
def start(self, application): logging.basicConfig() python_logging = txlog.PythonLoggingObserver() if config.debug: python_logging.logger.setLevel(logging.DEBUG) else: python_logging.logger.setLevel(logging.INFO) txlog.startLoggingWithObserver(python_logging.emit) txlog.addObserver(txlog.FileLogObserver(daily_logfile).emit)
def getReady(): """ must be called after setup() and after enabling handlers with enableHandler() """ ourTwistedLogger = getLogger("twisted") ourTwistedLogger.addFilter(AngelLogTwistedFilter()) filters = getLoggingFilters() for f in filters: logger = _getLogger(f[0]) logger.addFilter(AngelLogFilter(f)) twistedlog.startLoggingWithObserver(logTwisted, setStdout=0)
def start(logfile=None, loglevel='INFO', logstdout=True, logencoding='utf-8', crawler=None): loglevel = _get_log_level(loglevel) file = open(logfile, 'a') if logfile else sys.stderr log_observer = ScrapyFileLogObserver(file, loglevel, logencoding, crawler) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(log_observer.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning return log_observer
def init(log_file, log_level): """Start up logging system""" global _log_file, _log_level, _loud_init assert log_level in LEVELS _log_file = log_file _log_level = LEVELS.index(log_level) if _log_file != sys.stdout: _loud_init = True log.startLoggingWithObserver(_logger, setStdout=0)
def initialize(services, tns='spyne.examples.twisted.resource'): logging.basicConfig(level=logging.DEBUG) logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG) observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) application = Application(services, 'order', in_protocol=Soap11(validator='lxml'), out_protocol=Soap11()) return application
def initialize(services=(SomeService, )): logging.basicConfig(level=logging.DEBUG) logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG) observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) application = Application(services, 'spyne.examples.hello.twisted', in_protocol=HttpRpc(), out_protocol=XmlDocument()) return application
def initialize_logging(): """Initialize logging to send messages to Hubstorage job logs it initializes: - Python logging - Twisted logging - Scrapy logging - Redirects standard output and stderr to job log at INFO level This duplicates some code with Scrapy log.start(), but it's required in order to avoid scrapy from starting the log twice. """ # General python logging root = logging.getLogger() root.setLevel(logging.NOTSET) hdlr = HubstorageLogHandler() hdlr.setLevel(logging.INFO) hdlr.setFormatter(logging.Formatter('[%(name)s] %(message)s')) root.addHandler(hdlr) # Silence commonly used noisy libraries try: import boto # boto overrides its logger at import time except ImportError: pass nh = logging.NullHandler() for ln in ('boto', 'requests', 'hubstorage'): lg = logging.getLogger(ln) lg.propagate = 0 lg.addHandler(nh) # Redirect standard output and error to HS log sys.stdout = StdoutLogger(0, 'utf-8') sys.stderr = StdoutLogger(1, 'utf-8') # Twisted specifics (includes Scrapy) obs = HubstorageLogObserver(hdlr) _oldshowwarning = warnings.showwarning txlog.startLoggingWithObserver(obs.emit, setStdout=False) warnings.showwarning = _oldshowwarning # Scrapy specifics if 'SCRAPY_JOB' in os.environ: log.msg("Scrapy %s started" % __version__) log.msg("Optional features available: %s" % ", ".join(optional_features), level=log.DEBUG) log.start = _dummy # ugly but needed to prevent scrapy re-opening the log return hdlr
def run(argv): twisted_log.startLoggingWithObserver(RefloggingObserver()) if platform.system() != "Windows": if 'twisted.internet.reactor' not in sys.modules: log.debug("installing epoll reactor") from twisted.internet import epollreactor epollreactor.install() else: log.debug("reactor already installed") from twisted.internet import reactor application = makeApplication(argv) app.startApplication(application, None) reactor.run()
def main(argv): # logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s: %(message)s') logging.getLogger('server').setLevel(logging.INFO) # command-line arguments hostname = '127.0.0.1' port = 7788 try: opts, args = getopt.getopt(argv, "hH:P:", ["host=", "port="]) except getopt.GetoptError: print './server.py -H <hostname> -P <port>' sys.exit(2) for opt, arg in opts: if opt == '-h': print './server.py -H <hostname> -P <port>' sys.exit() elif opt in ("-H", "--host"): hostname = arg elif opt in ("-P", "--port"): if isinstance(arg, str): port = int(arg) elif isinstance(arg, int): port = arg else: port = arg # Initialize the application observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) application = Application([Replica], 'spyne.datastore.replica', in_protocol=Soap11(), out_protocol=Soap11()) #wsgi_app = WsgiApplication(application) # Register the WSGI application as handler to wsgi server & run http server #resource = WSGIResource(reactor, reactor, wsgi_app) resource = TwistedWebResource(application) site = Site(resource) reactor.listenTCP(port, site, interface=hostname) logging.info('listening on: %s:%d' % (hostname, port)) logging.info('wsdl is at: http://%s:%d/?wsdl' % (hostname, port)) sys.exit(reactor.run())
def main(): if len(sys.argv) < 2: print("Usage: %s <config dir>" % sys.argv[0]) sys.exit(1) transportobj = transport.Transport() boss = pluginbase.PluginBoss(sys.argv[1], transportobj) observer = log.FileLogObserver(sys.stdout) observer.timeFormat = "%Y-%m-%d %H:%M:%S" log.startLoggingWithObserver(observer.emit) log.msg("Abbott starting up!") boss.load_all_plugins() reactor.run()
def start(error_log_level=logging.ERROR, info_log_level=logging.INFO): """Writes twisted output to a logger using 'twisted' as the logger name (i.e., 'twisted' is passed as name arg to logging.getLogger(name)). """ o = LogLogObserver(error_log_level, info_log_level) # We do not use twisted setStdout logging because it is not clear to me # how to differentiate twisted-generated log entries and # redirected output. It is possible that all stdout and stderr # output has system '-', but from looking at twisted source code # there does not appear to be any guarantee that this is the case. # A simpler way of handling this is to simply separate # stdio and stderr redirection from twisted logging. --Dave log.startLoggingWithObserver(o.emit, setStdout=0) #setStdout=int(capture_output))
def main(argv): from twisted.web.server import Site from twisted.internet import reactor from twisted.python import log observer = log.PythonLoggingObserver('twisted') log.startLoggingWithObserver(observer.emit, setStdout=False) wr = TwistedWebResource(httprpc_soap_application) site = Site(wr) reactor.listenTCP(port, site) logging.info("listening on: %s:%d" % (host, port)) return reactor.run()
def __init__(self): cfg = config() # protocol^Wwhatever instances are kept here for the interact feature self.sessions = {} # for use by the uptime command self.starttime = time.time() # load db loggers self.dbloggers = [] for x in cfg.sections(): if not x.startswith('database_'): continue engine = x.split('_')[1] dbengine = 'database_' + engine lcfg = ConfigParser.ConfigParser() lcfg.add_section(dbengine) for i in cfg.options(x): lcfg.set(dbengine, i, cfg.get(x, i)) lcfg.add_section('honeypot') for i in cfg.options('honeypot'): lcfg.set('honeypot', i, cfg.get('honeypot', i)) log.msg('Loading dblog engine: %s' % (engine, )) dblogger = __import__('cowrie.dblog.%s' % (engine, ), globals(), locals(), ['dblog']).DBLogger(lcfg) log.startLoggingWithObserver(dblogger.emit, setStdout=False) self.dbloggers.append(dblogger) # load new output modules self.output_plugins = [] for x in cfg.sections(): if not x.startswith('output_'): continue engine = x.split('_')[1] output = 'output_' + engine lcfg = ConfigParser.ConfigParser() lcfg.add_section(output) for i in cfg.options(x): lcfg.set(output, i, cfg.get(x, i)) lcfg.add_section('honeypot') for i in cfg.options('honeypot'): lcfg.set('honeypot', i, cfg.get('honeypot', i)) log.msg('Loading output engine: %s' % (engine, )) output = __import__('cowrie.output.%s' % (engine, ), globals(), locals(), ['output']).Output(lcfg) log.startLoggingWithObserver(output.emit, setStdout=False) self.output_plugins.append(output)
def start(logfile=None, application_name="ooniprobe"): daily_logfile = None if not logfile: logfile = config.basic.logfile log_folder = os.path.dirname(logfile) log_filename = os.path.basename(logfile) daily_logfile = DailyLogFile(log_filename, log_folder) txlog.msg("Starting %s on %s (%s UTC)" % (application_name, otime.prettyDateNow(), otime.utcPrettyDateNow())) txlog.startLoggingWithObserver(LogWithNoPrefix(sys.stdout).emit) txlog.addObserver(txlog.FileLogObserver(daily_logfile).emit)
def start(gdata): yield utils.update_tz(gdata.config.system.tz) syslog = logger.Logger(gdata.config,"toughcrt") dispatch.register(syslog) log.startLoggingWithObserver(syslog.emit, setStdout=0) gdata.db_engine = get_engine(gdata.config,pool_size=10) # gdata.db = scoped_session(sessionmaker(bind=gdata.db_engine, autocommit=False, autoflush=False)) gdata.redisconf = redis_conf(gdata.config) gdata.cache = CacheManager(gdata.redisconf,cache_name='Cache-%s'%os.getpid()) gdata.statcache = StatCounter(gdata) # cache event init dispatch.register(gdata.cache) dispatch.register(RadClientWorker(gdata))
def logToDir(logDir): consoleLogFile = DailyLogFile('console.log', logDir) customLogs = {} def observer(event): message = formatEvent(event) logType = event.get('type') if logType is not None and logType not in customLogs: customLogs[logType] = DailyLogFile(logType + '.log', logDir) logfile = customLogs.get(logType, consoleLogFile) logfile.write(message + '\n') logfile.flush() startLoggingWithObserver(observer)
def main(): global http_client, nosql_client, bg_task, exception_log http_client = AsyncHTTP.AsyncHTTPRequester( -1, -1, int(0.8 * interval), -1, lambda x: exception_log.event(int(time.time()), x) if daemonize else sys.stderr.write(x + '\n'), max_tries=1) nosql_client = SpinNoSQL.NoSQLClient(SpinConfig.get_mongodb_config( SpinConfig.config['game_id']), max_retries=-1) # never give up #log.startLogging(sys.stdout) signal.signal(signal.SIGHUP, handle_SIGHUP) bg_task = task.LoopingCall(bgtask_func) if daemonize: Daemonize.daemonize() # update PID file with new PID open(pidfile, 'w').write('%d\n' % os.getpid()) exception_log = SpinLog.DailyRawLog( SpinConfig.config.get('log_dir', 'logs') + '/', '-exceptions.txt') # turn on Twisted logging def log_exceptions(eventDict): if eventDict['isError']: if 'failure' in eventDict: text = ((eventDict.get('why') or 'Unhandled Error') + '\n' + eventDict['failure'].getTraceback().strip()) else: text = ' '.join([str(m) for m in eventDict['message']]) exception_log.event(int(time.time()), text) def log_raw(eventDict): return log.startLoggingWithObserver(log_raw) log.addObserver(log_exceptions) bg_task.start(interval) reactor.run()
def start(self, logfile=None, application_name="ooniprobe"): from ooni.settings import config if not logfile: logfile = os.path.expanduser(config.basic.logfile) log_folder = os.path.dirname(logfile) if (not os.access(log_folder, os.W_OK) or (os.path.exists(logfile) and not os.access(logfile, os.W_OK))): # If we don't have permissions to write to the log_folder or # logfile. log_folder = config.running_path logfile = os.path.join(log_folder, "ooniprobe.log") self.log_filepath = logfile mkdir_p(log_folder) log_filename = os.path.basename(logfile) file_log_level = levels.get(config.basic.loglevel, levels['INFO']) stdout_log_level = levels['INFO'] if config.advanced.debug: stdout_log_level = levels['DEBUG'] if config.basic.rotate == 'daily': logfile = MyDailyLogFile(log_filename, log_folder) elif config.basic.rotate == 'length': logfile = LogFile(log_filename, log_folder, rotateLength=int( human_size_to_bytes( config.basic.rotate_length)), maxRotatedFiles=config.basic.max_rotated_files) else: logfile = open(os.path.join(log_folder, log_filename), 'a') self.fileObserver = MsecLogObserver(logfile, log_level=file_log_level) self.stdoutObserver = StdoutStderrObserver(sys.stdout, log_level=stdout_log_level) tw_log.startLoggingWithObserver(self.fileObserver.emit) tw_log.addObserver(self.stdoutObserver.emit) tw_log.msg("Starting %s on %s (%s UTC)" % (application_name, otime.prettyDateNow(), otime.prettyDateNowUTC()))
def main(_fdopen=os.fdopen): """ Main function to be run if __name__ == "__main__". @param _fdopen: If specified, the function to use in place of C{os.fdopen}. @param _fdopen: C{callable} """ config = WorkerOptions() config.parseOptions() from twisted.trial._dist.worker import WorkerProtocol workerProtocol = WorkerProtocol(config['force-gc']) protocolIn = _fdopen(_WORKER_AMP_STDIN) protocolOut = _fdopen(_WORKER_AMP_STDOUT, 'w') workerProtocol.makeConnection(FileWrapper(protocolOut)) observer = WorkerLogObserver(workerProtocol) startLoggingWithObserver(observer.emit, False) while True: try: r = protocolIn.read(1) if isinstance(r, unicode): r = r.encode("utf-8") except IOError as e: if e.args[0] == errno.EINTR: if sys.version_info < (3, 0): sys.exc_clear() continue else: raise if r == b'': break else: workerProtocol.dataReceived(r) protocolOut.flush() sys.stdout.flush() sys.stderr.flush() if config.tracer: sys.settrace(None) results = config.tracer.results() results.write_results(show_missing=True, summary=False, coverdir=config.coverdir().path)
def run_command_start(options): """ Subcommand "crossbar start". """ ## start Twisted logging ## if not options.logdir: logfd = sys.stderr else: from twisted.python.logfile import DailyLogFile logfd = DailyLogFile.fromFullPath( os.path.join(options.logdir, 'node.log')) from crossbar.twisted.processutil import DefaultSystemFileLogObserver flo = DefaultSystemFileLogObserver(logfd, system="{:<10} {:>6}".format( "Controller", os.getpid())) log.startLoggingWithObserver(flo.emit) log.msg("=" * 30 + " Crossbar.io " + "=" * 30 + "\n") import crossbar log.msg("Crossbar.io {} starting".format(crossbar.__version__)) ## we use an Autobahn utility to import the "best" available Twisted reactor ## reactor = install_reactor(options.reactor, options.debug) from twisted.python.reflect import qual log.msg("Running on {} using {} reactor".format( platform.python_implementation(), qual(reactor.__class__).split('.')[-1])) log.msg("Starting from node directory {}".format(options.cbdir)) ## create and start Crossbar.io node ## from crossbar.controller.node import Node node = Node(reactor, options) node.start() try: log.msg("Entering reactor event loop ...") reactor.run() except Exception as e: log.msg("Could not start reactor: {0}".format(e))
def __init__(self): cfg = config() # protocol^Wwhatever instances are kept here for the interact feature self.sessions = {} # for use by the uptime command self.starttime = time.time() # convert old pass.db root passwords passdb_file = '%s/pass.db' % (cfg.get('honeypot', 'data_path'),) if os.path.exists(passdb_file): userdb = UserDB() print 'pass.db deprecated - copying passwords over to userdb.txt' if os.path.exists('%s.bak' % (passdb_file,)): print 'ERROR: %s.bak already exists, skipping conversion!' % \ (passdb_file,) else: passdb = anydbm.open(passdb_file, 'c') for p in passdb: userdb.adduser('root', 0, p) passdb.close() os.rename(passdb_file, '%s.bak' % (passdb_file,)) print 'pass.db backed up to %s.bak' % (passdb_file,) # load db loggers self.dbloggers = [] for x in cfg.sections(): if not x.startswith('database_'): continue engine = x.split('_')[1] dbengine = 'database_' + engine lcfg = ConfigParser.ConfigParser() lcfg.add_section(dbengine) for i in cfg.options(x): lcfg.set(dbengine, i, cfg.get(x, i)) lcfg.add_section('honeypot') for i in cfg.options('honeypot'): lcfg.set('honeypot', i, cfg.get('honeypot', i)) print 'Loading dblog engine: %s' % (engine,) dblogger = __import__( 'kippo.dblog.%s' % (engine,), globals(), locals(), ['dblog']).DBLogger(lcfg) log.startLoggingWithObserver(dblogger.emit, setStdout=False) self.dbloggers.append(dblogger)
def start(self, application): """ Initialize the logging system. If an L{ILogObserver} component has been set on C{application}, then it will be used as the log observer. Otherwise a log observer will be created based on the command-line options. @param application: The application on which to check for an L{ILogObserver}. """ observer = application.getComponent(ILogObserver, None) if observer is None: observer = self._getLogObserver() self._observer = observer log.startLoggingWithObserver(self._observer) self._initialLog()
def setup(self, daemon, log_syslog, log_file, debug=False): observer = None if log_syslog: observer = syslog.SyslogObserver('siptrackd').emit elif log_file: if log_file == '-': if daemon: raise SiptrackError('Daemons can\'t log to stdout') log_fd = sys.stdout else: log_fd = logfile.LogFile.fromFullPath(log_file) observer = log.FileLogObserver(log_fd).emit else: raise SiptrackError('No logging method selected') log.startLoggingWithObserver(observer) sys.stdout.flush() self.debug_logging = debug self.setup_complete = True return True
def initLog(log_file, log_path, loglevel=0): from twisted.python import log from twisted.python.logfile import DailyLogFile global log_level, _tracemsg log_level = loglevel if log_path == 'stdout': # support the UnitTest fout = sys.stdout else: class TyDailyLogFile(DailyLogFile): def __init__(self, log_file, log_path): DailyLogFile.__init__(self, log_file, log_path) def _openFile(self): self.path = os.path.join(self.directory, self.name) + "." + datetime.now().strftime('%Y_%m_%d') DailyLogFile._openFile(self) def rotate(self): """每次打开文件均携带当前日期,因此不需要进行文件的重命名 """ self._file.close() self._openFile() fout = TyDailyLogFile(log_file, log_path) if _tracemsg: for msg in _tracemsg: fout.write(msg) fout.write('\n') _tracemsg = None class _(log.FileLogObserver): log.FileLogObserver.timeFormat = '%m-%d %H:%M:%S.%f' def emit(self, eventDict): taskinfo = "%r" % stackless.getcurrent() # "<tasklet[, 1]>" --> ", 1" eventDict['system'] = taskinfo[11:-2] log.FileLogObserver.emit(self, eventDict) fl = _(fout) log.startLoggingWithObserver(fl.emit)
def runFromSource(cls, job_source, dbuser, logger, _log_twisted=False): """Run all ready jobs provided by the specified source. The dbuser parameter is not ignored. :param _log_twisted: For debugging: If True, emit verbose Twisted messages to stderr. """ logger.info("Running through Twisted.") if _log_twisted: logging.getLogger().setLevel(0) logger_object = logging.getLogger('twistedjobrunner') handler = logging.StreamHandler(sys.stderr) logger_object.addHandler(handler) observer = log.PythonLoggingObserver(loggerName='twistedjobrunner') log.startLoggingWithObserver(observer.emit) runner = cls(job_source, dbuser, logger) reactor.callWhenRunning(runner.runAll) run_reactor() return runner
def start(logfile=None, loglevel=None, logstdout=None): global started if started or not settings.getbool('LOG_ENABLED'): return started = True if log.defaultObserver: # check twisted log not already started loglevel = _get_log_level(loglevel) logfile = logfile or settings['LOG_FILE'] file = open(logfile, 'a') if logfile else sys.stderr if logstdout is None: logstdout = settings.getbool('LOG_STDOUT') sflo = ScrapyFileLogObserver(file, loglevel, settings['LOG_ENCODING']) _oldshowwarning = warnings.showwarning log.startLoggingWithObserver(sflo.emit, setStdout=logstdout) # restore warnings, wrongly silenced by Twisted warnings.showwarning = _oldshowwarning msg("Scrapy %s started (bot: %s)" % (scrapy.__version__, \ settings['BOT_NAME']))
def initialize_logging(): """Initialize logging to send messages to Hubstorage job logs it initializes: - Python logging - Twisted logging - Scrapy logging - Redirects standard output and stderr to job log at INFO level This duplicates some code with Scrapy log.start(), but it's required in order to avoid scrapy from starting the log twice. """ # General python logging root = logging.getLogger() root.setLevel(logging.NOTSET) hdlr = HubstorageLogHandler() hdlr.setLevel(logging.INFO) hdlr.setFormatter(logging.Formatter('[%(name)s] %(message)s')) root.addHandler(hdlr) # Silence commonly used noisy libraries try: import boto # boto overrides its logger at import time except ImportError: pass nh = logging.NullHandler() for ln in ('boto', 'requests', 'hubstorage'): lg = logging.getLogger(ln) lg.propagate = 0 lg.addHandler(nh) # Redirect standard output and error to HS log sys.stdout = StdoutLogger(0, 'utf-8') sys.stderr = StdoutLogger(1, 'utf-8') # Twisted specifics (includes Scrapy) obs = HubstorageLogObserver(hdlr) _oldshowwarning = warnings.showwarning txlog.startLoggingWithObserver(obs.emit, setStdout=False) warnings.showwarning = _oldshowwarning return hdlr
def setup_logging(): if not os.path.exists(galaxy_settings.LOG_DIR): os.makedirs(galaxy_settings.LOG_DIR) if galaxy_settings.LOG_FILE: logfile = DailyLogFile.fromFullPath( os.path.join(galaxy_settings.LOG_DIR, galaxy_settings.LOG_FILE)) else: logfile = sys.stderr observer = GalaxyFileLogObserver(logfile, galaxy_settings.LOG_ENCODING) startLoggingWithObserver(observer.emit, setStdout=False) # setup general logging for Scrapy if not sys.warnoptions: # Route warnings through python logging logging.captureWarnings(True) observer = log.PythonLoggingObserver('twisted') observer.start() logging.root.setLevel(logging.NOTSET) dictConfig(DEFAULT_LOGGING)