def run(options): """ This is the long-running magic-folders function which performs synchronization between local and remote folders. """ from twisted.internet import reactor # being logging to stdout def event_to_string(event): # "t.i.protocol.Factory" produces a bunch of 'starting' and # 'stopping' messages that are quite noisy in the logs (and # don't provide useful information); skip them. if isinstance(event.get("log_source", None), Factory): return # docstring seems to indicate eventAsText() includes a # newline, but it .. doesn't return u"{}\n".format(eventAsText(event, includeSystem=False)) globalLogBeginner.beginLoggingTo([ FileLogObserver(options.stdout, event_to_string), ]) # start the daemon services config = options.parent.config service = MagicFolderService.from_config(reactor, config) return service.run()
def _run_script(self, script, args, log, session): """ Runs a pysmurf control script. Can only run from the reactor. Arguments ---------- script: string path to the script you wish to run args: list, optional List of command line arguments to pass to the script. Defaults to []. log: string/bool, optional Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ with self.protocol_lock.acquire_timeout(0, job=script) as acquired: if not acquired: return False, "The requested script cannot be run because " \ "script {} is already running".format(self.protocol_lock.job) self.current_session = session try: # IO is not really safe from the reactor thread, so we possibly # need to find another way to do this if people use it and it # causes problems... logger = None if isinstance(log, str): self.log.info("Logging output to file {}".format(log)) log_file = yield threads.deferToThread(open, log, 'a') logger = Logger( observer=FileLogObserver(log_file, log_formatter)) elif log: # If log==True, use agent's logger logger = self.log self.prot = PysmurfScriptProtocol(script, log=logger) self.prot.deferred = Deferred() python_exec = sys.executable cmd = [python_exec, '-u', script] + list(map(str, args)) self.log.info("{exec}, {cmd}", exec=python_exec, cmd=cmd) reactor.spawnProcess(self.prot, python_exec, cmd, env=os.environ) rc = yield self.prot.deferred return (rc == 0 ), "Script has finished with exit code {}".format(rc) finally: # Sleep to allow any remaining messages to be put into the # session var yield dsleep(1.0) self.current_session = None
def make_logfile_observer(path, show_source=False): """ Make an observer that writes out to C{path}. """ from twisted.logger import FileLogObserver f = open(path, "w") def _render(event): if event.get("log_system", u"-") == u"-": logSystem = u"{:<10} {:>6}".format("Controller", os.getpid()) else: logSystem = event["log_system"] if show_source and event.get("log_namespace") is not None: logSystem += " " + event.get("cb_namespace", event.get("log_namespace", '')) if event.get("log_format", None) is not None: eventText = formatEvent(event) else: eventText = "" if "log_failure" in event: # This is a traceback. Print it. eventText = eventText + event["log_failure"].getTraceback() eventString = NOCOLOUR_FORMAT.format(formatTime(event["log_time"]), logSystem, eventText) + os.linesep return eventString return FileLogObserver(f, _render)
def init(debug=False): debug_enabled = debug or os.environ.get('DEBUG', False) logging_level = logging.DEBUG if debug_enabled else logging.INFO logging.basicConfig( level=logging_level, format='%(asctime)s [%(name)s] %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filemode='a') logging.getLogger('gnupg').setLevel(logging.WARN) logging.getLogger('gnupg').addFilter(PrivateKeyFilter()) def formatter(event): try: event['log_time'] = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(event['log_time'])) event['log_level'] = event['log_level'].name.upper() event['log_format'] = str( event['log_format']) + '\n' if event.get('log_format') else '' logstring = u'{log_time} [{log_namespace}] {log_level} ' + event[ 'log_format'] return logstring.format(**event) except Exception as e: return "Error while formatting log event: {!r}\nOriginal event: {!r}\n".format( e, event) observers = [FileLogObserver(sys.stdout, formatter)] globalLogBeginner.beginLoggingTo(observers)
def logObserver(outFile, timeFormat=timeFormatRFC3339): def formatEvent(event): return formatLogEvent( event, formatTime=lambda e: formatTime(e, timeFormat) ) return FileLogObserver(outFile, formatEvent)
def runtwisted(config=None): """ Run the Twisted server. """ globalLogBeginner.beginLoggingTo( [FileLogObserver(sys.stdout, lambda _: formatEvent(_) + "\n")]) threadpool = ThreadPool(maxthreads=30) app = api.makeapp(config=config) wsgi_app = WSGIResource(reactor, threadpool, app) class OptimaResource(Resource): isLeaf = True def __init__(self, wsgi): self._wsgi = wsgi def render(self, request): request.prepath = [] request.postpath = ['api'] + request.postpath[:] r = self._wsgi.render(request) request.responseHeaders.setRawHeaders( b'Cache-Control', [b'no-cache', b'no-store', b'must-revalidate']) request.responseHeaders.setRawHeaders(b'expires', [b'0']) return r # If we have a full path for the client directory, use that directory. if os.path.isabs(config.CLIENT_DIR): clientDirTarget = config.CLIENT_DIR # Otherwise (we have a relative path), use it (correcting so it is with # respect to the sciris repo directory). else: clientDirTarget = '%s%s%s' % (os.pardir, os.sep, config.CLIENT_DIR) base_resource = File('%s%sdist%s' % (clientDirTarget, os.sep, os.sep)) base_resource.putChild( 'dev', File('%s%ssrc%s' % (clientDirTarget, os.sep, os.sep))) base_resource.putChild('api', OptimaResource(wsgi_app)) site = Site(base_resource) try: port = str(sys.argv[1]) except IndexError: port = "8091" # Start the threadpool now, shut it down when we're closing threadpool.start() reactor.addSystemEventTrigger('before', 'shutdown', threadpool.stop) endpoint = serverFromString(reactor, "tcp:port=" + port) endpoint.listen(site) reactor.run()
def get_text_file_observer(name=DEFAULT_LOG_FILENAME, path=USER_LOG_DIR): _ensure_dir_exists(path) logfile = LogFile(name=name, directory=path, rotateLength=MAXIMUM_LOG_SIZE, maxRotatedFiles=MAX_LOG_FILES) observer = FileLogObserver(formatEvent=formatEventAsClassicLogText, outFile=logfile) return observer
def run_script(self, session, params=None): """ Runs a pysmurf control script. Args: script (string): path to the script you wish to run args (list, optional): List of command line arguments to pass to the script. Defaults to []. log (string/bool, optional): Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ if params is None: params = {} if self.prot is not None: return False, "Process {} is already running".format( self.prot.fname) script_file = params['script'] args = params.get('args', []) log_file = params.get('log', True) params = {'fname': script_file} if type(log_file) is str: fout = open(log_file, 'a') params['log'] = Logger( observer=FileLogObserver(fout, log_formatter)) elif log_file: params['log'] = self.log else: params['log'] = None self.prot = PysmurfScriptProtocol(**params) pyth = sys.executable cmd = [pyth, '-u', script_file] + args self.log.info("{exec}, {cmd}", exec=pyth, cmd=cmd) reactor.callFromThread(reactor.spawnProcess, self.prot, pyth, cmd, env=os.environ) while self.prot.end_status is None: time.sleep(1) end_status = self.prot.end_status self.prot = None if isinstance(end_status.value, ProcessDone): return True, "Script has finished naturally" elif isinstance(end_status.value, ProcessTerminated): return False, "Script has been killed"
def setup_logging(log_level, log_name, log_directory=""): """ Configure the logger to use the specified log file and log level """ log_filter = LogLevelFilterPredicate() log_filter.setLogLevelForNamespace( "orscanner", LogLevel.levelWithName(log_level.lower())) # Set up logging log_file = DailyLogFile(log_name, log_directory) file_observer = FileLogObserver(log_file, log_event_format) console_observer = FileLogObserver(sys.stdout, log_event_format) file_filter_observer = FilteringLogObserver(file_observer, (log_filter, )) console_filter_observer = FilteringLogObserver(console_observer, (log_filter, )) globalLogPublisher.addObserver(file_filter_observer) globalLogPublisher.addObserver(console_filter_observer)
def _get_stdlib_logger(self, name): fout = StringIO() handler = TwistedLoggerLogHandler() handler.publisher = LogPublisher( FileLogObserver(fout, formatForSystemd)) log = logging.getLogger(name) log.setLevel(logging.DEBUG) log.propagate = False log.addHandler(handler) return log, fout
def textFileLogObserver(outFile, timeFormat=timeFormatRFC3339): config_level = config.getLogLevel() def formatEvent(event): log_level = event.get('log_level') if log_level is not None and log_level < config_level: # 比配置级别低的日志直接丢弃 return None return formatEventAsClassicLogText( event, formatTime=lambda e: formatTime(e, timeFormat)) return FileLogObserver(outFile, formatEvent)
def main(): if Config.get('logging')['verbose'] is True: globalLogPublisher.addObserver( FileLogObserver(sys.stdout, lambda e: eventAsText(e) + "\n")) if Config.get('logging')['log_to_file'] is True: logfile = os.path.join(os.path.abspath(os.path.dirname(__file__)), "logs/log.json") globalLogPublisher.addObserver( jsonFileLogObserver(io.open(logfile, 'w+'), '')) server = Server(Config.get('server')['port']) server.run()
def test_logger_namespace(self): """ A `twisted.logger.Logger` with a namespace gets that namespace as a prefix. """ fout = StringIO() log = Logger(namespace="ns", observer=FileLogObserver(fout, formatForSystemd)) log.info("info\n{more}", more="info") log.error("err") self.assertEqual(("<6>[ns] info\n" "<6> info\n" "<3>[ns] err\n"), fout.getvalue())
def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver: """ A log observer that formats events to a flattened JSON representation. Args: outFile: The file object to write to. metadata: Metadata to be added to each log object. """ def formatEvent(_event: dict) -> str: flattened = flatten_event(_event, metadata) return _encoder.encode(flattened) + "\n" return FileLogObserver(outFile, formatEvent)
def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver: """ A log observer that formats events to a flattened JSON representation. Args: outFile: The file object to write to. metadata: Metadata to be added to each log object. """ def formatEvent(_event: dict) -> str: flattened = flatten_event(_event, metadata) return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n" return FileLogObserver(outFile, formatEvent)
def makeFilteredFileLogObserver(cls, stream, withTime=True): """ For a child process that has its stdout captured by the master process to be logged by the master, we strip out the time from the log entry since the master process will always add one. Setting C{withTime} to L{False} will ensure no time is generated. """ assert (cls.filterPublisher is None and cls.filterObserver is None), "Only call this once" timeFormat = formatTime if withTime else lambda _: u"" cls.filterObserver = FileLogObserver( stream, lambda event: formatEventAsClassicLogText(event, formatTime=timeFormat)) cls.filterPublisher = LogPublisher(cls.filterObserver) return cls.filterPublisher
def test_log_legacy(self): fout = StringIO() p = LegacyLogPublisher(publishPublisher=LogPublisher( FileLogObserver(fout, formatForSystemd))) p.msg("msg") p.msg("msg", system="system") p.msg("m\ns\ng", logLevel=logging.DEBUG) self.assertEqual( ("<6>[-] msg\n" "<6>[system] msg\n" "<7>[-] m\n" "<7> s\n" "<7> g\n"), fout.getvalue(), )
def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver: """ A log observer that formats events like the traditional log formatter and sends them to `outFile`. Args: outFile (file object): The file object to write to. """ def formatEvent(_event: dict) -> str: event = dict(_event) event["log_level"] = event["log_level"].name.upper() event[ "log_format"] = "- {log_namespace} - {log_level} - {request} - " + ( event.get("log_format", "{log_text}") or "{log_text}") return eventAsText(event, includeSystem=False) + "\n" return FileLogObserver(outFile, formatEvent)
def run(): """ Run the server. """ globalLogBeginner.beginLoggingTo( [FileLogObserver(sys.stdout, lambda _: formatEvent(_) + "\n")]) threadpool = ThreadPool(maxthreads=30) wsgi_app = WSGIResource(reactor, threadpool, api.app) class ScirisResource(Resource): isLeaf = True def __init__(self, wsgi): self._wsgi = wsgi def render(self, request): request.prepath = [] request.postpath = ['api'] + request.postpath[:] r = self._wsgi.render(request) request.responseHeaders.setRawHeaders( b'Cache-Control', [b'no-cache', b'no-store', b'must-revalidate']) request.responseHeaders.setRawHeaders(b'expires', [b'0']) return r base_resource = File('.') base_resource.putChild('api', ScirisResource(wsgi_app)) site = Site(base_resource) try: port = str(sys.argv[1]) except IndexError: port = "8080" # Start the threadpool now, shut it down when we're closing threadpool.start() reactor.addSystemEventTrigger('before', 'shutdown', threadpool.stop) endpoint = serverFromString(reactor, "tcp:port=" + port) endpoint.listen(site) reactor.run()
def run_app_in_twisted(): globalLogBeginner.beginLoggingTo( [FileLogObserver(sys.stdout, lambda _: formatEvent(_) + "\n")]) threadpool = ThreadPool(maxthreads=30) wsgi_app = WSGIResource(reactor, threadpool, app) class ServerResource(Resource): isLeaf = True def __init__(self, wsgi): Resource.__init__(self) self._wsgi = wsgi def render(self, request): """ Adds headers to disable caching of api calls """ request.prepath = [] request.postpath = ['api'] + request.postpath[:] r = self._wsgi.render(request) request.responseHeaders.setRawHeaders( b'Cache-Control', [b'no-cache', b'no-store', b'must-revalidate']) request.responseHeaders.setRawHeaders(b'expires', [b'0']) return r # web-client files served from here base_resource = File('../client/dist') # api requests must go through /api base_resource.putChild('api', ServerResource(wsgi_app)) # downloadable files go here base_resource.putChild('file', File(config.SAVE_FOLDER)) site = Site(base_resource) # Start the threadpool now, shut it down when we're closing threadpool.start() reactor.addSystemEventTrigger('before', 'shutdown', threadpool.stop) endpoint = serverFromString(reactor, "tcp:port=" + str(config.PORT)) endpoint.listen(site) reactor.run()
def _doSecondaryActions(action, tzpath, xmlfile, url): tzdb = SecondaryTimezoneDatabase(tzpath, xmlfile, url) try: tzdb.readDatabase() except: pass if action == "cache": print("Caching from secondary server: {}".format(url, )) observer = FileLogObserver( sys.stdout, lambda event: formatEventAsClassicLogText(event)) Logger.beginLoggingTo([observer], redirectStandardIO=False) reactor.callLater(0, _runInReactor, tzdb) reactor.run() else: usage("Invalid action: {}".format(action, ))
def run_twisted(port=8080, flask_app=None, client_dir=None, do_log=False, reactor_args=None): # Give an error if we pass in no Flask server or client path. if reactor_args is None: reactor_args = {} if (flask_app is None) and (client_dir is None): print('ERROR: Neither client or server are defined.') return None if do_log: # Set up logging. globalLogBeginner.beginLoggingTo([ FileLogObserver(sys.stdout, lambda _: formatEvent(_) + "\n") ]) if client_dir is not None: # If there is a client path, set up the base resource. base_resource = File(client_dir) # If we have a flask app... if flask_app is not None: thread_pool = ThreadPool( maxthreads=30) # Create a thread pool to use with the app. wsgi_app = WSGIResource( reactor, thread_pool, flask_app ) # Create the WSGIResource object for the flask server. if client_dir is None: # If we have no client path, set the WSGI app to be the base resource. base_resource = ScirisResource(wsgi_app) else: # Otherwise, make the Flask app a child resource. base_resource.putChild(b'api', ScirisResource(wsgi_app)) thread_pool.start( ) # Start the threadpool now, shut it down when we're closing reactor.addSystemEventTrigger('before', 'shutdown', thread_pool.stop) # Create the site. site = Site(base_resource) endpoint = serverFromString( reactor, "tcp:port=" + str(port) ) # Create the endpoint we want to listen on, and point it to the site. endpoint.listen(site) reactor.run(**reactor_args) # Start the reactor. return None
def test_logger_namespace_failure(self): """ An unexpected failure, logged as critical, is displayed across multiple lines. """ fout = StringIO() log = Logger(namespace="ns", observer=FileLogObserver(fout, formatForSystemd)) log.failure("Something went wrong", Failure(Exception("1\n2\n3"))) self.assertEqual( ("<2>[ns] Something went wrong\n" "<2> Traceback (most recent call last):\n" "<2> Failure: builtins.Exception: 1\n" "<2> 2\n" "<2> 3\n"), fout.getvalue(), )
def run(): from twisted.internet import reactor root = logging.getLogger() logging.getLogger("django").setLevel(logging.INFO) logging.raiseExceptions = settings.DEBUG logging._srcfile = None # Disable expensive collection of location information. root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO) root.addHandler(TwistedLoggerLogHandler()) observer = FilteringLogObserver( FileLogObserver(sys.stdout, formatForSystemd), [dropUnhandledHTTP2Shutdown], ) globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False) log.info("Yarrharr {version} starting", version=__version__) factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None) endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT) reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory) updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor)) loopEndD = updateLoop.start() loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f)) @receiver(schedule_changed) def threadPollNow(sender, **kwargs): """ When the `schedule_changed` signal is sent poke the polling loop. If it is sleeping this will cause it to poll immediately. Otherwise this will cause it to run the poll function immediately once it returns (running it again protects against races). """ log.debug("Immediate poll triggered by {sender}", sender=sender) reactor.callFromThread(updateLoop.poke) def stopUpdateLoop(): updateLoop.stop() return loopEndD reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop) reactor.run()
def run(options): """ This is the long-running magic-folders function which performs synchronization between local and remote folders. """ from twisted.internet import reactor # being logging to stdout def event_to_string(event): # docstring seems to indicate eventAsText() includes a # newline, but it .. doesn't return u"{}\n".format(eventAsText(event, includeSystem=False)) globalLogBeginner.beginLoggingTo([ FileLogObserver(options.stdout, event_to_string), ]) # start the daemon services config = options.parent.config service = MagicFolderService.from_config(reactor, config) return service.run()
def make_logfile_observer(path, show_source=False): """ Make an observer that writes out to C{path}. """ from twisted.logger import FileLogObserver from twisted.python.logfile import DailyLogFile f = DailyLogFile.fromFullPath(path) def _render(event): if event.get("log_system", u"-") == u"-": logSystem = u"{:<10} {:>6}".format("Controller", os.getpid()) else: logSystem = event["log_system"] if show_source and event.get("log_namespace") is not None: logSystem += " " + event.get("cb_namespace", event.get("log_namespace", '')) if event.get("log_format", None) is not None: eventText = formatEvent(event) else: eventText = u"" if "log_failure" in event: # This is a traceback. Print it. eventText = eventText + event["log_failure"].getTraceback() eventString = strip_ansi( STANDARD_FORMAT.format(startcolor=u'', time=formatTime(event["log_time"]), system=logSystem, endcolor=u'', text=eventText)) + os.linesep return eventString return FileLogObserver(f, _render)
def main(reactor): log = Logger() def fe(inp): return formatEventAsClassicLogText(inp) + "\n" globalLogPublisher.addObserver(FileLogObserver(sys.stdout, fe)) ds_certData = getModule(__name__).filePath.sibling('darkserver').child( 'darkserver.pem').getContent() rt_certData = getModule(__name__).filePath.sibling('rootCA.d').child( 'rootCA.crt').getContent() ds_cert = ssl.PrivateCertificate.loadPEM(ds_certData) rt_cert = ssl.Certificate.loadPEM(rt_certData) # l.info("ds_cert: {ds_cert.inspect()}", ds_cert=ds_cert) # l.info("ds_cert key: ds_cert.privateKey.inspect()", privateKey=ds_cert.privateKey) # l.info("rt_cert: rt_cert.inspect()", rt_cert=rt_cert) options = ssl.CertificateOptions(certificate=ds_cert.original, privateKey=ds_cert.privateKey.original, trustRoot=rt_cert, verifyDepth=2, raiseMinimumTo=ssl.TLSVersion.TLSv1_1) # l.info('Starting service') # f = MainService() # f.options = options log.info('Initiating listening') # tlsFactory = TLSMemoryBIOFactory(options, False, f.getFingerFactory()) # reactor.listenTCP(8123, tlsFactory) reactor.listenSSL( 8123, MainFactory(), options ) # FingerFactory({b'alice' : b'nice girl'})) #, ds_cert.options(rt_cert)) # reactor.listenTCP(8123, f.getFingerFactory()) return defer.Deferred()
'allow_management': 'False' }, 'auto_rx': { 'address': 'http://localhost:5000', 'path': '/home/pi/radiosonde_auto_rx', 'telemetry_port': '55673' } } CONFIG_FILE_LIST = [ '/etc/rs_agent.conf', '~/.rs_agent.conf', '~/.config/rs_agent/config.conf', 'config.conf' ] log = Logger() log.observer.addObserver( FileLogObserver(sys.stdout, lambda e: eventAsText(e) + "\n")) class HorusRepeater(protocol.DatagramProtocol): def __init__(self, server_iface): self.iface = server_iface def datagramReceived(self, datagram, address): self.iface.callRemote('upload_telemetry', datagram) def any_active_sdr(jdict): '''Are any of the sdrs not scanning or idle''' allowed = ['Scanning', 'Not Tasked'] return all(state not in allowed for _, state in jdict.items())
def passThroughFileLogObserver(outFile): def formatEvent(event): return formatEventAsString(event) return FileLogObserver(outFile, formatEvent)
def __init__(self, outFile): MockFileLogObserver.outFile = outFile FileLogObserver.__init__(self, outFile, str)
def __init__(self, outFile: TextIO) -> None: MockFileLogObserver.outFile = outFile FileLogObserver.__init__(self, outFile, str)