Пример #1
0
    def __init__(self, config):

        # Dictionary to hold active clients.
        self.clients = {}

        # Set up logging.
        # TODO: Use config parameters here.
        self.log = Logger()

        # Logging target.
        log_observer = textFileLogObserver(sys.stdout)
        # Filter out levels to the specified severity.
        logging_level_predicate = [LogLevelFilterPredicate(LogLevel.debug)]
        # Set up an Observer to actually perform the filtering.
        log_filter = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                          predicates=logging_level_predicate)
        # And register global logging for the filtering observer.
        globalLogBeginner.beginLoggingTo([log_filter])

        # Passed-in game configuration.
        self.configuration = config

        # Game data.
        self.game = game.GameData()

        # Init main game loop.
        self.game_loop = task.LoopingCall(self.GameLoop)
        self.game_loop.start(30)

        # Holds a (cancelable! - just "self.shutdown.cancel()") callback for shutting down the server as needed.
        self.shutdown = None
Пример #2
0
    def run(self):
        self.factory = HTTPFactory(
            self.channel_layer,
            self.action_logger,
            timeout=self.http_timeout,
            websocket_timeout=self.websocket_timeout,
            ping_interval=self.ping_interval,
            ws_protocols=self.ws_protocols,
            root_path=self.root_path,
        )
        # Redirect the Twisted log to nowhere
        globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True)
        # Listen on a socket
        if self.unix_socket:
            reactor.listenUNIX(self.unix_socket, self.factory)
        elif self.file_descriptor:
            # socket returns the same socket if supplied with a fileno
            sock = socket.socket(fileno=self.file_descriptor)
            reactor.adoptStreamPort(self.file_descriptor, sock.family, self.factory)
        else:
            reactor.listenTCP(self.port, self.factory, interface=self.host)

        if "twisted" in self.channel_layer.extensions:
            logging.info("Using native Twisted mode on channel layer")
            reactor.callLater(0, self.backend_reader_twisted)
        else:
            logging.info("Using busy-loop synchronous mode on channel layer")
            reactor.callLater(0, self.backend_reader_sync)
        reactor.callLater(2, self.timeout_checker)
        reactor.run(installSignalHandlers=self.signal_handlers)
Пример #3
0
    def run(self):
        self.factory = HTTPFactory(
            self.channel_layer,
            self.action_logger,
            timeout=self.http_timeout,
            websocket_timeout=self.websocket_timeout,
            ping_interval=self.ping_interval,
            ping_timeout=self.ping_timeout,
            ws_protocols=self.ws_protocols,
            root_path=self.root_path,
            proxy_forwarded_address_header=self.proxy_forwarded_address_header,
            proxy_forwarded_port_header=self.proxy_forwarded_port_header
        )
        if self.verbosity <= 1:
            # Redirect the Twisted log to nowhere
            globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True)
        else:
            globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)])

        # Disabled deliberately for the moment as it's worse performing
        if "twisted" in self.channel_layer.extensions and False:
            logger.info("Using native Twisted mode on channel layer")
            reactor.callLater(0, self.backend_reader_twisted)
        else:
            logger.info("Using busy-loop synchronous mode on channel layer")
            reactor.callLater(0, self.backend_reader_sync)
        reactor.callLater(2, self.timeout_checker)

        for socket_description in self.endpoints:
            logger.info("Listening on endpoint %s" % socket_description)
            # Twisted requires str on python2 (not unicode) and str on python3 (not bytes)
            ep = serverFromString(reactor, str(socket_description))
            ep.listen(self.factory)

        reactor.run(installSignalHandlers=self.signal_handlers)
Пример #4
0
def run(options):
    """
    This is the long-running magic-folders function which performs
    synchronization between local and remote folders.
    """
    from twisted.internet import reactor

    # being logging to stdout
    def event_to_string(event):
        # "t.i.protocol.Factory" produces a bunch of 'starting' and
        # 'stopping' messages that are quite noisy in the logs (and
        # don't provide useful information); skip them.
        if isinstance(event.get("log_source", None), Factory):
            return
        # docstring seems to indicate eventAsText() includes a
        # newline, but it .. doesn't
        return u"{}\n".format(eventAsText(event, includeSystem=False))

    globalLogBeginner.beginLoggingTo([
        FileLogObserver(options.stdout, event_to_string),
    ])

    # start the daemon services
    config = options.parent.config
    service = MagicFolderService.from_config(reactor, config)
    return service.run()
Пример #5
0
def setup_logging(log_config=None, log_file=None, verbosity=None):
    log_format = (
        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
        " - %(message)s")
    if log_config is None:

        level = logging.INFO
        level_for_storage = logging.INFO
        if verbosity:
            level = logging.DEBUG
            if verbosity > 1:
                level_for_storage = logging.DEBUG

        # FIXME: we need a logging.WARN for a -q quiet option
        logger = logging.getLogger('')
        logger.setLevel(level)

        logging.getLogger('synapse.storage').setLevel(level_for_storage)

        formatter = logging.Formatter(log_format)
        if log_file:
            # TODO: Customisable file size / backup count
            handler = logging.handlers.RotatingFileHandler(
                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3)

            def sighup(signum, stack):
                logger.info("Closing log file due to SIGHUP")
                handler.doRollover()
                logger.info("Opened new log file due to SIGHUP")

            # TODO(paul): obviously this is a terrible mechanism for
            #   stealing SIGHUP, because it means no other part of synapse
            #   can use it instead. If we want to catch SIGHUP anywhere
            #   else as well, I'd suggest we find a nicer way to broadcast
            #   it around.
            if getattr(signal, "SIGHUP"):
                signal.signal(signal.SIGHUP, sighup)
        else:
            handler = logging.StreamHandler()
        handler.setFormatter(formatter)

        handler.addFilter(LoggingContextFilter(request=""))

        logger.addHandler(handler)
    else:
        with open(log_config, 'r') as f:
            logging.config.dictConfig(yaml.load(f))

    # It's critical to point twisted's internal logging somewhere, otherwise it
    # stacks up and leaks kup to 64K object;
    # see: https://twistedmatrix.com/trac/ticket/8164
    #
    # Routing to the python logging framework could be a performance problem if
    # the handlers blocked for a long time as python.logging is a blocking API
    # see https://twistedmatrix.com/documents/current/core/howto/logger.html
    # filed as https://github.com/matrix-org/synapse/issues/1727
    #
    # However this may not be too much of a problem if we are just writing to a file.
    observer = STDLibLogObserver()
    globalLogBeginner.beginLoggingTo([observer])
Пример #6
0
def start_logging(out=None, level='info'):
    """
    Start logging to the file-like object in ``out``. By default, this
    is stdout.
    """
    global _loggers, _observer, _log_level

    if level not in log_levels:
        raise RuntimeError(
            "Invalid log level '{0}'; valid are: {1}".format(
                level, ', '.join(log_levels)
            )
        )

    if _loggers is None:
        return

    if out is None:
        out = _stdout

    if _loggers is not None:
        for ref in _loggers:
            instance = ref()
            if instance:
                instance._set_log_level(level)
    _loggers = None
    _log_level = level

    _observer = _LogObserver(out)
    if _NEW_LOGGER:
        globalLogBeginner.beginLoggingTo([_observer])
    else:
        from twisted.python import log
        log.startLogging(out)
Пример #7
0
def init(debug=False):
    debug_enabled = debug or os.environ.get('DEBUG', False)
    logging_level = logging.DEBUG if debug_enabled else logging.INFO

    logging.basicConfig(
        level=logging_level,
        format='%(asctime)s [%(name)s] %(levelname)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S',
        filemode='a')

    logging.getLogger('gnupg').setLevel(logging.WARN)
    logging.getLogger('gnupg').addFilter(PrivateKeyFilter())

    def formatter(event):
        try:
            event['log_time'] = time.strftime(
                '%Y-%m-%d %H:%M:%S', time.localtime(event['log_time']))
            event['log_level'] = event['log_level'].name.upper()
            event['log_format'] = str(
                event['log_format']) + '\n' if event.get('log_format') else ''
            logstring = u'{log_time} [{log_namespace}] {log_level} ' + event[
                'log_format']
            return logstring.format(**event)
        except Exception as e:
            return "Error while formatting log event: {!r}\nOriginal event: {!r}\n".format(
                e, event)

    observers = [FileLogObserver(sys.stdout, formatter)]
    globalLogBeginner.beginLoggingTo(observers)
Пример #8
0
def main(reactor, *descriptions):
    log = Logger()
    globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stdout)])
    endpointObjects = [
        endpoints.clientFromString(reactor, description)
        for description in descriptions
    ]
    hostPorts = [(endpoint._host, endpoint._port)
                 for endpoint in endpointObjects]

    pool = threadpool.ThreadPool(minthreads=1, maxthreads=1, name="persiter")
    persister = Persists(reactor, pool)
    reactor.addSystemEventTrigger("before", "shutdown", persister.stop)
    persister.start("log.sqlite", hostPorts)

    analyzer = AnalyzesText(persister)

    factory = EncodingCollectionFactory(reactor, random.SystemRandom(),
                                        analyzer)

    for (host, port), endpoint in zip(hostPorts, endpointObjects):
        try:
            protocol = yield endpoint.connect(factory)
        except Exception:
            log.failure("Could not connect to {host}:{port}",
                        host=host,
                        port=port)
            raise
        protocol.addr = (host, port)

    defer.returnValue(defer.Deferred())
Пример #9
0
    def setup(self, level='warn', namespace_levels=None, text_file=sys.stderr,
              time_format='%H:%M:%S.%f', handle_stdlib=True, stdlib_level='notset',
              stdlib_prefix='stdlib.'):

        """
        Initiates the twisted.logger system:
        - level: default log level as a string (ie: 'warn', 'info', ....).
        - namespace_levels: a dict of namespaces/log level names.
        - text_file: where to write the log to.
        - time_format: as supported by datetime.strftime.
        - handle_stdlib: True/False.
        - stdlib_level: level name, above which stdlib logging is handled.
        - stdlib_prefix: added to stdlib logger name, used as namespace.
        """

        file_observer = textFileLogObserver(text_file, timeFormat=time_format)
        self._predicate = LogLevelFilterPredicate(
            defaultLogLevel=LogLevel.levelWithName(level),
        )
        if namespace_levels:
            for namespace, level_name in namespace_levels.items():
                level = LogLevel.levelWithName(level_name)
                self._predicate.setLogLevelForNamespace(namespace, level)
        globalLogBeginner.beginLoggingTo([self._filtered_observer(file_observer)])

        if handle_stdlib:
            self._handle_stdlib(stdlib_level, stdlib_prefix)
Пример #10
0
def start_logging(out=_stdout, level='info'):
    """
    Start logging to the file-like object in ``out``. By default, this
    is stdout.
    """
    global _loggers, _observer, _log_level, _started_logging

    if level not in log_levels:
        raise RuntimeError(
            "Invalid log level '{0}'; valid are: {1}".format(
                level, ', '.join(log_levels)
            )
        )

    if _started_logging:
        return

    _started_logging = True

    _log_level = level
    set_global_log_level(_log_level)

    if out:
        _observer = _LogObserver(out)

    if _NEW_LOGGER:
        _observers = []
        if _observer:
            _observers.append(_observer)
        globalLogBeginner.beginLoggingTo(_observers)
    else:
        assert out, "out needs to be given a value if using Twisteds before 15.2"
        from twisted.python import log
        log.startLogging(out)
Пример #11
0
def init_logging(context):
    session_files = context["session_files"]
    
    # Setup logging
    log_filename = session_files.session_dir / "singt.log"
    logfile = open(log_filename, 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn
        )
    )

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]
        )
    )

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)

    # ASCII-art title
    title = art.text2art("Singt Client")
    log.info("\n"+title)
Пример #12
0
def start_logging(out=None, level='info'):
    """
    Start logging to the file-like object in ``out``. By default, this
    is stdout.
    """
    global _loggers, _observer, _log_level

    if level not in log_levels:
        raise RuntimeError("Invalid log level '{0}'; valid are: {1}".format(
            level, ', '.join(log_levels)))

    if _loggers is None:
        return

    if out is None:
        out = _stdout

    if _loggers is not None:
        for ref in _loggers:
            instance = ref()
            if instance:
                instance._set_log_level(level)
    _loggers = None
    _log_level = level

    _observer = _LogObserver(out)
    if _NEW_LOGGER:
        globalLogBeginner.beginLoggingTo([_observer])
    else:
        from twisted.python import log
        log.startLogging(out)
Пример #13
0
def cli():
    parser = argparse.ArgumentParser(prog=__version__.package)
    parser.add_argument('--version',
                        action='version',
                        version=__version__.public())
    parser.add_argument('--openhab',
                        default=default_openhab,
                        type=URL.from_text,
                        help='OpenHAB URL, default is {}'.format(
                            default_openhab.to_text()))
    parser.add_argument(
        '--endpoint',
        default=default_endpoint,
        help=
        'Twisted endpoint descriptor for internal web server to listen on, default is {}'
        .format(default_endpoint))
    options = parser.parse_args()

    log = Logger()
    output = textFileLogObserver(sys.stderr, timeFormat='')
    globalLogBeginner.beginLoggingTo([output])

    log.debug('Listening on {endpoint:}', endpoint=options.endpoint)
    log.debug('Connecting to {openhab:}', openhab=options.openhab.to_text())

    metrics = MetricsPage(reactor, options.openhab, creds)
    metricsThings = MetricsThingPage(reactor, options.openhab, creds)
    root = RootPage()
    root.putChild(b'metrics', metrics)
    root.putChild(b'metric-things', metricsThings)
    site = Site(root)
    server = serverFromString(reactor, options.endpoint)
    server.listen(site)

    reactor.run()
Пример #14
0
def start_logging(out=_stdout, level='info'):
    """
    Start logging to the file-like object in ``out``. By default, this
    is stdout.
    """
    global _loggers, _observer, _log_level, _started_logging

    if level not in log_levels:
        raise RuntimeError("Invalid log level '{0}'; valid are: {1}".format(
            level, ', '.join(log_levels)))

    if _started_logging:
        return

    _started_logging = True

    _log_level = level
    set_global_log_level(_log_level)

    if out:
        _observer = _LogObserver(out)

    _observers = []
    if _observer:
        _observers.append(_observer)
    globalLogBeginner.beginLoggingTo(_observers)
Пример #15
0
def main(reactor, *argv):
    import argparse

    a = argparse.ArgumentParser()

    a.add_argument('number', type=int)
    a.add_argument('subprocess', nargs='+')

    args = a.parse_args(argv)

    globalLogBeginner.beginLoggingTo([jsonFileLogObserver(sys.stdout)])

    executablePath = distutils.spawn.find_executable(args.subprocess[0])
    args.subprocess[0] = executablePath

    collection = ProcessCollection()
    reactor.addSystemEventTrigger("before", "shutdown", collection.stop)

    processes = [RespawningProcess(reactor,
                                   executablePath, args.subprocess,
                                   usePTY=True)
                 for _ in xrange(args.number)]
    collection.addProcesses(processes)
    collection.start()

    terminationDeferred = defer.Deferred()
    stdio.StandardIO(FireOnInput(terminationDeferred))

    return terminationDeferred
Пример #16
0
def main():
    if not HAS_LDAP3:
        raise RuntimeError(
            "Missing required 'ldap' module (pip install ldap3).")

    parser = argparse.ArgumentParser(
        prog='openldap_exporter', description='Prometheus OpenLDAP exporter')
    parser.add_argument('--config',
                        type=argparse.FileType('r'),
                        help='configuration file',
                        required=True)
    arguments = parser.parse_args()

    configs = yaml.load(arguments.config)
    arguments.config.close()

    output = textFileLogObserver(sys.stderr, timeFormat='')
    globalLogBeginner.beginLoggingTo([output])

    # Setup web client
    metrics = MetricsPage(configs['clients'])
    root = RootPage()
    root.putChild(b'metrics', metrics)
    site = QuietSite(root)
    endpoint = serverFromString(reactor,
                                "tcp:port=" + str(configs['server_port']))
    endpoint.listen(site)

    reactor.run()
Пример #17
0
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig',
                                          LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit',
                                          LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt',
                                          LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt',
                                          LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx',
                                          LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    if globalLogBeginner._temporaryObserver is not None:
        observers = [
            FilteringLogObserver(observer=fileObserver, predicates=[predicate])
        ]
        globalLogBeginner.beginLoggingTo(observers)
Пример #18
0
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate    = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    observers    = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ]
    globalLogBeginner.beginLoggingTo(observers)
Пример #19
0
def start_logging(out=_stdout, level='info'):
    """
    Start logging to the file-like object in ``out``. By default, this
    is stdout.
    """
    global _loggers, _observer, _log_level, _started_logging

    if level not in log_levels:
        raise RuntimeError("Invalid log level '{0}'; valid are: {1}".format(
            level, ', '.join(log_levels)))

    if _started_logging:
        return

    _started_logging = True

    _log_level = level
    set_global_log_level(_log_level)

    if out:
        _observer = _LogObserver(out)

    if _NEW_LOGGER:
        _observers = []
        if _observer:
            _observers.append(_observer)
        globalLogBeginner.beginLoggingTo(_observers)
    else:
        assert out, "out needs to be given a value if using Twisteds before 15.2"
        from twisted.python import log
        log.startLogging(out)
Пример #20
0
def main():
    # parse the command-line arguments
    global_config, virtuals = parse_args()

    # redirect the Twisted log to nowhere to prevent a memory 'leak'
    # see: https://twistedmatrix.com/trac/ticket/8164
    globalLogBeginner.beginLoggingTo([lambda _: None],
                                     redirectStandardIO=False,
                                     discardBuffer=True)

    # setup the actual Logger
    global_config.log = logging.getLogger(__name__)
    global_config.log.setLevel(global_config.log_level)
    if global_config.supervised:
        handler = logging.StreamHandler()
    else:
        handler = logging.FileHandler(global_config.logfile, 'a', "UTF-8")
    handler.setFormatter(
        logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
    global_config.log.addHandler(handler)

    # check whether to daemonize or not
    if global_config.supervised:
        start_reactor(virtuals, global_config)
    else:
        daemon_handling(virtuals, global_config)
Пример #21
0
    def run(self):
        self.factory = HTTPFactory(
            self.channel_layer,
            self.action_logger,
            timeout=self.http_timeout,
            websocket_timeout=self.websocket_timeout,
            ping_interval=self.ping_interval,
            ping_timeout=self.ping_timeout,
            ws_protocols=self.ws_protocols,
            root_path=self.root_path,
        )
        # Redirect the Twisted log to nowhere
        globalLogBeginner.beginLoggingTo([lambda _: None],
                                         redirectStandardIO=False,
                                         discardBuffer=True)
        # Listen on a socket
        if self.unix_socket:
            reactor.listenUNIX(self.unix_socket, self.factory)
        elif self.file_descriptor:
            # socket returns the same socket if supplied with a fileno
            sock = socket.socket(fileno=self.file_descriptor)
            reactor.adoptStreamPort(self.file_descriptor, sock.family,
                                    self.factory)
        else:
            reactor.listenTCP(self.port, self.factory, interface=self.host)

        if "twisted" in self.channel_layer.extensions:
            logging.info("Using native Twisted mode on channel layer")
            reactor.callLater(0, self.backend_reader_twisted)
        else:
            logging.info("Using busy-loop synchronous mode on channel layer")
            reactor.callLater(0, self.backend_reader_sync)
        reactor.callLater(2, self.timeout_checker)
        reactor.run(installSignalHandlers=self.signal_handlers)
Пример #22
0
    def start(self, console, logfile):
        """Configure and start logging based on user preferences
        
        Args:
            console (bool): Console logging enabled
            logfile (str): Logfile path
        """

        # Log to console option.
        if console:
            globalLogBeginner.beginLoggingTo(
                [textFileLogObserver(sys.stdout)], )
            return

        # Check the file is valid and can be opened in append mode
        if os.path.exists(logfile) and not os.path.isfile(logfile):
            print "Logfile %s is not a valid file: exiting." % logfile
            exit(1)
        try:
            f = open(logfile, 'a')
        except IOError:
            print "Can't open logfile %s: exiting." % logfile
            exit(1)

        # Begin logging to the file.
        globalLogBeginner.beginLoggingTo([
            textFileLogObserver(f),
        ],
                                         redirectStandardIO=False)
Пример #23
0
def start_logging(level=LogLevel.info):
    observers = []

    predicate = LogLevelFilterPredicate(defaultLogLevel=level)
    observers.append(FilteringLogObserver(observer=textFileLogObserver(sys.stdout), predicates=[predicate]))

    globalLogBeginner.beginLoggingTo(observers)
Пример #24
0
    def run(self):
        self.factory = HTTPFactory(
            self.channel_layer,
            self.action_logger,
            timeout=self.http_timeout,
            websocket_timeout=self.websocket_timeout,
            ping_interval=self.ping_interval,
            ping_timeout=self.ping_timeout,
            ws_protocols=self.ws_protocols,
            root_path=self.root_path,
            proxy_forwarded_address_header=self.proxy_forwarded_address_header,
            proxy_forwarded_port_header=self.proxy_forwarded_port_header
        )
        if self.verbosity <= 1:
            # Redirect the Twisted log to nowhere
            globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True)
        else:
            globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)])

        # Disabled deliberately for the moment as it's worse performing
        if "twisted" in self.channel_layer.extensions and False:
            logger.info("Using native Twisted mode on channel layer")
            reactor.callLater(0, self.backend_reader_twisted)
        else:
            logger.info("Using busy-loop synchronous mode on channel layer")
            reactor.callLater(0, self.backend_reader_sync)
        reactor.callLater(2, self.timeout_checker)

        for socket_description in self.endpoints:
            logger.info("Listening on endpoint %s" % socket_description)
            # Twisted requires str on python2 (not unicode) and str on python3 (not bytes)
            ep = serverFromString(reactor, str(socket_description))
            ep.listen(self.factory)

        reactor.run(installSignalHandlers=self.signal_handlers)
Пример #25
0
Файл: app.py Проект: hjalves/sjw
 def run(self):
     config = self.config
     observers = [STDLibLogObserver()]
     globalLogBeginner.beginLoggingTo(observers, redirectStandardIO=False)
     logging.config.dictConfig(config['logging'])
     logging.captureWarnings(True)
     logger.info('Logging configured!')
     return react(self.main_loop)
Пример #26
0
    def run(self):
        # A dict of protocol: {"application_instance":, "connected":, "disconnected":} dicts
        self.connections = {}
        # Make the factory
        self.http_factory = HTTPFactory(self)
        self.ws_factory = WebSocketFactory(self, server="Daphne")
        self.ws_factory.setProtocolOptions(
            autoPingTimeout=self.ping_timeout,
            allowNullOrigin=True,
            openHandshakeTimeout=self.websocket_handshake_timeout)
        if self.verbosity <= 1:
            # Redirect the Twisted log to nowhere
            globalLogBeginner.beginLoggingTo([lambda _: None],
                                             redirectStandardIO=False,
                                             discardBuffer=True)
        else:
            globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)])

        # Detect what Twisted features are enabled
        if http.H2_ENABLED:
            logger.info("HTTP/2 support enabled")
        else:
            logger.info(
                "HTTP/2 support not enabled (install the http2 and tls Twisted extras)"
            )

        # Kick off the timeout loop
        reactor.callLater(1, self.application_checker)
        reactor.callLater(2, self.timeout_checker)
        reactor.callLater(10, self.monitoring)

        from pympler import tracker
        self.tr = tracker.SummaryTracker()

        for socket_description in self.endpoints:
            logger.info("Configuring endpoint %s", socket_description)
            ep = serverFromString(reactor, str(socket_description))
            listener = ep.listen(self.http_factory)
            listener.addCallback(self.listen_success)
            listener.addErrback(self.listen_error)
            self.listeners.append(listener)

        # Set the asyncio reactor's event loop as global
        # TODO: Should we instead pass the global one into the reactor?
        asyncio.set_event_loop(reactor._asyncioEventloop)

        # Verbosity 3 turns on asyncio debug to find those blocking yields
        if self.verbosity >= 3:
            asyncio.get_event_loop().set_debug(True)

        reactor.addSystemEventTrigger("before", "shutdown",
                                      self.kill_all_applications)
        if not self.abort_start:
            # Trigger the ready flag if we had one
            if self.ready_callable:
                self.ready_callable()
            # Run the reactor
            reactor.run(installSignalHandlers=self.signal_handlers)
Пример #27
0
def runtwisted(config=None):
    """
    Run the Twisted server.
    """
    globalLogBeginner.beginLoggingTo(
        [FileLogObserver(sys.stdout, lambda _: formatEvent(_) + "\n")])

    threadpool = ThreadPool(maxthreads=30)
    app = api.makeapp(config=config)
    wsgi_app = WSGIResource(reactor, threadpool, app)

    class OptimaResource(Resource):
        isLeaf = True

        def __init__(self, wsgi):
            self._wsgi = wsgi

        def render(self, request):
            request.prepath = []
            request.postpath = ['api'] + request.postpath[:]

            r = self._wsgi.render(request)

            request.responseHeaders.setRawHeaders(
                b'Cache-Control',
                [b'no-cache', b'no-store', b'must-revalidate'])
            request.responseHeaders.setRawHeaders(b'expires', [b'0'])
            return r

    # If we have a full path for the client directory, use that directory.
    if os.path.isabs(config.CLIENT_DIR):
        clientDirTarget = config.CLIENT_DIR

    # Otherwise (we have a relative path), use it (correcting so it is with
    # respect to the sciris repo directory).
    else:
        clientDirTarget = '%s%s%s' % (os.pardir, os.sep, config.CLIENT_DIR)

    base_resource = File('%s%sdist%s' % (clientDirTarget, os.sep, os.sep))
    base_resource.putChild(
        'dev', File('%s%ssrc%s' % (clientDirTarget, os.sep, os.sep)))
    base_resource.putChild('api', OptimaResource(wsgi_app))

    site = Site(base_resource)

    try:
        port = str(sys.argv[1])
    except IndexError:
        port = "8091"

    # Start the threadpool now, shut it down when we're closing
    threadpool.start()
    reactor.addSystemEventTrigger('before', 'shutdown', threadpool.stop)

    endpoint = serverFromString(reactor, "tcp:port=" + port)
    endpoint.listen(site)

    reactor.run()
Пример #28
0
def app_main(withgui):

    try:

        infomsg('loading TWISTED subsystem')
        from twisted.internet import reactor
        from twisted.logger import STDLibLogObserver, globalLogBeginner

        # redirect twisted logging to python logging
        globalLogBeginner.beginLoggingTo([STDLibLogObserver()])

        infomsg('starting application.')

        # DHCP, DNS: find missing addresses
        setExternalPhoneAddress()
        setExternalGateway()
        setExternalProxyAddress()

        try:
            #force exception if not found
            config.get(consts.SECTION, consts.EXTPHONEADDR)
            config.get(consts.SECTION, consts.LOCPROXYADDR)
            config.get(consts.SECTION, consts.EXTGATEWAY)
            config.get(consts.SECTION, consts.EXTPROXYADDR)
        except:
            raise ZsiposCfgException("wrong or missing parameter")

        import rtp
        rtp.init()  # @UndefinedVariable

        if withgui:

            def thread_init_cb():
                rtp.register_gui_thread()  # @UndefinedVariable

            import gui
            gui.init(thread_init_cb)  # @UndefinedVariable

        log.info("loading SIP subsystem")
        import GMITM
        gmitm = GMITM.GMITM()
        if withgui:
            gmitm.setEventSink(
                gui.GUI_GMITMEventListener())  # @UndefinedVariable
        log.info("GMITM created.")

        reactor.callLater(1, rtp.postinit)  # @UndefinedVariable
        reactor.run(installSignalHandlers=True)  # @UndefinedVariable

    finally:
        try:
            gui.close()  # @UndefinedVariable
        except:
            pass
        try:
            rtp.close()  # @UndefinedVariable
        except:
            pass
Пример #29
0
def main(accessibility=False):
    pygame.mixer.pre_init(frequency=44100, buffer=1024)
    pygame.init()
    pygame.font.init()
    globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stdout)])

    display = Display(accessibility=accessibility)
    display.setView('LoginView')
    display.init()
Пример #30
0
def main(reactor, *argv):
    argument_parser = argparse.ArgumentParser()
    argument_parser.add_argument('solr_url')
    argument_parser.add_argument('json_docs')

    args = argument_parser.parse_args(argv)

    globalLogBeginner.beginLoggingTo([jsonFileLogObserver(sys.stdout)])

    return replay(args.json_docs, args.solr_url)
def begin_or_register(observer, redirectStandardIO=False, **kwargs):
    global began_logging

    if not began_logging:
        globalLogBeginner.beginLoggingTo([observer],
                                         redirectStandardIO=redirectStandardIO,
                                         **kwargs)
        began_logging = True
    else:
        globalLogPublisher.addObserver(observer=observer)  # pragma nocover
Пример #32
0
def divert_logger():
    # noinspection PyPackageRequirements
    from twisted.logger import FilteringLogObserver, LogLevel, LogLevelFilterPredicate, STDLibLogObserver, globalLogBeginner
    showwarning = warnings.showwarning
    globalLogBeginner.beginLoggingTo([
        FilteringLogObserver(
            STDLibLogObserver(),
            [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)])
    ],
                                     redirectStandardIO=False)
    warnings.showwarning = showwarning  # twisted's beginLoggingTo() will divert python warnings to its own logging system. here we undo that.
Пример #33
0
def init_logging(configuration, program_name):
    """Given a basic configuration, set up logging."""
    logging.init_app_logging(configuration.log_dir,
                             configuration.log_level,
                             progname=program_name,
                             quiet=configuration.quiet)
    # Initialize twisted logging, even if we don't explicitly use it,
    # because of leaky logs https://twistedmatrix.com/trac/ticket/8164
    globalLogBeginner.beginLoggingTo([lambda _: None],
                                     redirectStandardIO=False,
                                     discardBuffer=True)
Пример #34
0
    def run(self):
        # Create process-local channel prefixes
        # TODO: Can we guarantee non-collision better?
        process_id = "".join(
            random.choice(string.ascii_letters) for i in range(10))
        self.send_channel = "daphne.response.%s!" % process_id
        # Make the factory
        self.factory = HTTPFactory(
            self.channel_layer,
            action_logger=self.action_logger,
            send_channel=self.send_channel,
            timeout=self.http_timeout,
            websocket_timeout=self.websocket_timeout,
            websocket_connect_timeout=self.websocket_connect_timeout,
            ping_interval=self.ping_interval,
            ping_timeout=self.ping_timeout,
            ws_protocols=self.ws_protocols,
            root_path=self.root_path,
            proxy_forwarded_address_header=self.proxy_forwarded_address_header,
            proxy_forwarded_port_header=self.proxy_forwarded_port_header,
            websocket_handshake_timeout=self.websocket_handshake_timeout)
        if self.verbosity <= 1:
            # Redirect the Twisted log to nowhere
            globalLogBeginner.beginLoggingTo([lambda _: None],
                                             redirectStandardIO=False,
                                             discardBuffer=True)
        else:
            globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)])

        # Detect what Twisted features are enabled
        if http.H2_ENABLED:
            logger.info("HTTP/2 support enabled")
        else:
            logger.info(
                "HTTP/2 support not enabled (install the http2 and tls Twisted extras)"
            )

        if "twisted" in self.channel_layer.extensions and not self.force_sync:
            logger.info("Using native Twisted mode on channel layer")
            reactor.callLater(0, self.backend_reader_twisted)
        else:
            logger.info("Using busy-loop synchronous mode on channel layer")
            reactor.callLater(0, self.backend_reader_sync)
        reactor.callLater(2, self.timeout_checker)

        for socket_description in self.endpoints:
            logger.info("Listening on endpoint %s" % socket_description)
            # Twisted requires str on python2 (not unicode) and str on python3 (not bytes)
            ep = serverFromString(reactor, str(socket_description))
            listener = ep.listen(self.factory)
            listener.addErrback(self.on_listener_error)
            self.listeners.append(listener)

        reactor.run(installSignalHandlers=self.signal_handlers)
Пример #35
0
def main():
  parser = ArgumentParser()
  parser.execute()

  log = Logger()
  globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stdout)])

  log.info("Starting cards-against-humanity server version {major}.{minor}.{revision}", major=version.MAJOR, minor=version.MINOR, revision=version.REVISION)

  endpoint = TCP4ServerEndpoint(reactor, parser.port)
  endpoint.listen(ServerFactory(parser.black_cards, parser.database))
  reactor.run()
Пример #36
0
def begin_or_register(observer):
    # type: (Any) -> None
    """Register observer with the global LogPublisher

    Registers via the global LogBeginner the first time called.
    """
    global began_logging
    if not began_logging:
        globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
        began_logging = True
    else:
        globalLogPublisher.addObserver(observer)
Пример #37
0
def run_client(address, username):
    title = art.text2art("Singt")
    print(title)

    # Setup logging
    logfile = open(f"client-{username}.log", 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]  # was: warn
        ))

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]))

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)

    # Start a logger with a namespace for a particular subsystem of our application.
    log = Logger("client")

    # TCP
    # ===
    point = TCP4ClientEndpoint(reactor, address, 1234)
    client = TCPClient(username)
    d = connectProtocol(point, client)

    def err(failure):
        print("An error occurred:", failure)

    d.addErrback(err)

    # UDP
    # ===

    # 0 means any port, we don't care in this case
    udp_client = UDPClient(address, 12345)
    reactor.listenUDP(0, udp_client)

    # Reactor
    # =======

    print("Running reactor")
    reactor.run()

    print("Finished.")
Пример #38
0
def create_server(
        host="localhost", port=5000, debug=False,
        observer_name="twisted", flask_app=None
):
    """
    Create and setup twisted server
    (only need to do a reactor.run() after)

    :param host: Host address to bind to (default: localhost)
    :type host: str
    :param port: Port to bind to (default: 5000)
    :type port: int
    :param debug: Should use debug mode (default: False)
    :type debug: bool
    :param observer_name: Name of twisted observer to log to stdlib
        (default: twisted)
        if None -> do not create observer
    :type observer_name: None | str
    :param flask_app: Flask object to be served (default: None)
        if None -> use imported app
    :type flask_app: flask.Flask
    :rtype: None
    """
    if observer_name is not None:
        observer = STDLibLogObserver(name=observer_name)
        globalLogBeginner.beginLoggingTo([observer])
    if flask_app is None:
        flask_app = create_app(debug=debug)

    # Create a Twisted Web resource for our WebSocket server
    ws_factory = WebSocketServerFactory(
        u"ws://{}:{}".format(host, port)
    )

    ws_factory.protocol = PluginServerProtocol
    # Needed if Hixie76 is to be supported
    # ws_factory.setProtocolOptions(allowHixie76=True)
    ws_resource = WebSocketResource(ws_factory)

    # Create a Twisted Web WSGI resource for our Flask server
    wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), flask_app)

    # Create a root resource serving everything via WSGI/Flask, but
    # The path "/ws" served by our webocket
    root_resource = WSGIRootResource(wsgi_resource, {'ws': ws_resource})

    # Create a Twisted Web Site and run everything
    site = Site(root_resource)
    # Needed if Hixie76 is to be supported
    # site.protocol = HTTPChannelHixie76Aware

    reactor.listenTCP(port, site)
Пример #39
0
def startLogging(file: TextIO = sys.stdout) -> None:
    """
    Start Twisted logging system.
    """
    fileObserver = textFileLogObserver(file)
    filteringObserver = FilteringLogObserver(
        fileObserver, (globalLogLevelPredicate,)
    )

    globalLogBeginner.beginLoggingTo(
        [filteringObserver],
        redirectStandardIO=False,
    )
Пример #40
0
def startLogging(console=True, filepath=None):
    '''
    Starts the global Twisted logger subsystem with maybe
    stdout and/or a file specified in the config file
    '''
    global logLevelFilterPredicate

    observers = []
    if console:
        observers.append( FilteringLogObserver(observer=textFileLogObserver(sys.stdout),
            predicates=[logLevelFilterPredicate] ))

    if filepath is not None and filepath != "":
        observers.append( FilteringLogObserver(observer=textFileLogObserver(open(filepath,'a')),
            predicates=[logLevelFilterPredicate] ))
    globalLogBeginner.beginLoggingTo(observers)
Пример #41
0
    def go(self, reactor):
        data = Data("Hello world\n", "text/plain")
        data.putChild("", data)
        factory = Site(data)

        # TODO: adoptStreamConnection should really support AF_UNIX
        protocol = ConnectionFromManager(reactor, factory)
        skt = fromfd(MAGIC_FILE_DESCRIPTOR, AF_UNIX, SOCK_STREAM)
        os.close(MAGIC_FILE_DESCRIPTOR)
        serverTransport = UNIXServer(skt, protocol, None, None, 1234, reactor)
        protocol.makeConnection(serverTransport)
        serverTransport.startReading()

        globalLogBeginner.beginLoggingTo([protocol.sendLog])
        factory.doStart()

        return Deferred()
Пример #42
0
 def _reactor_start(self):
     """
     Start the reactor if it is not already running
     If someone else started it -> someone else should shut it down
     """
     try:
         if reactor.running:
             observer = STDLibLogObserver(name='twisted')
             globalLogBeginner.beginLoggingTo([observer])
             reactor.run(False)
         else:
             self.info("Reactor already running")
             self._reactor_shutdown = False
     except ReactorAlreadyRunning:
         self.info("Reactor already running")
         self._reactor_shutdown = False
     except:
         self.exception("Failed to start reactor")
Пример #43
0
def run():
    from twisted.internet import reactor

    root = logging.getLogger()
    logging.getLogger('django').setLevel(logging.INFO)
    logging.raiseExceptions = settings.DEBUG
    logging._srcfile = None  # Disable expensive collection of location information.
    root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
    root.addHandler(TwistedLoggerLogHandler())
    observer = FilteringLogObserver(
        FileLogObserver(sys.stdout, formatForSystemd),
        [dropUnhandledHTTP2Shutdown],
    )
    globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)

    log.info("Yarrharr {version} starting", version=__version__)

    factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
    endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
    reactor.addSystemEventTrigger('before', 'startup', endpoint.listen, factory)

    updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
    loopEndD = updateLoop.start()
    loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))

    @receiver(schedule_changed)
    def threadPollNow(sender, **kwargs):
        """
        When the `schedule_changed` signal is sent poke the polling loop. If it
        is sleeping this will cause it to poll immediately. Otherwise this will
        cause it to run the poll function immediately once it returns (running
        it again protects against races).
        """
        log.debug("Immediate poll triggered by {sender}", sender=sender)
        reactor.callFromThread(updateLoop.poke)

    def stopUpdateLoop():
        updateLoop.stop()
        return loopEndD

    reactor.addSystemEventTrigger('before', 'shutdown', stopUpdateLoop)

    reactor.run()
Пример #44
0
    def startLogging(self):
        """
        Start the L{twisted.logger} logging system.
        """
        logFile = self.logFile

        fileLogObserverFactory = self.fileLogObserverFactory

        fileLogObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.defaultLogLevel
        )

        filteringObserver = FilteringLogObserver(
            fileLogObserver, [logLevelPredicate]
        )

        globalLogBeginner.beginLoggingTo([filteringObserver])
Пример #45
0
def init(debug=False):
    debug_enabled = debug or os.environ.get('DEBUG', False)
    logging_level = logging.DEBUG if debug_enabled else logging.INFO

    logging.basicConfig(level=logging_level,
                        format='%(asctime)s [%(name)s] %(levelname)s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        filemode='a')

    logging.getLogger('gnupg').setLevel(logging.WARN)
    logging.getLogger('gnupg').addFilter(PrivateKeyFilter())

    def formatter(event):
        event['log_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(event['log_time']))
        event['log_level'] = event['log_level'].name.upper()
        logstring = u'{log_time} [{log_namespace}] {log_level} ' + event['log_format'] + '\n'
        return logstring.format(**event)

    observers = [FileLogObserver(sys.stdout, formatter)]

    globalLogBeginner.beginLoggingTo(observers)
Пример #46
0
def start(syslog=False, logfile=None, syslog_prefix='lumina', redirect_stdio=False,
          loglevel=None):
    ''' Start the custom logger '''

    # System defaults from twisted.logger._global.py:
    #   globalLogPublisher = LogPublisher()
    #   globalLogBeginner = LogBeginner(globalLogPublisher, sys.stderr, sys, warnings)

    if logfile is None:
        logfile = sys.stdout

    if loglevel is None:
        loglevel = LogLevel.info

    # Lumina log observers
    if syslog and SYSLOG_IMPORTED:
        out_observer = SyslogObserver(prefix=syslog_prefix)
    else:
        out_observer = FileLogObserver(sys.stdout, formatLuminaLogText)

    #level_filter = LogLevelFilterPredicate(defaultLogLevel=loglevel)
    #level_filter.setLogLevelForNamespace('server', LogLevel.warn)

    observers = (
        LuminaLogFormatter(),
        FilteringLogObserver(
            out_observer,
            [ #level_filter,
                LuminaFilterPredicate(minimumLoglevel=loglevel),
            ]
        ),
    )

    # This logger will take over the system (the default LogPublisher). It will
    # iterate over any messages that has already been logged prior to
    # this registration. However, any errors in the observers will be silently
    # ignored because the observers are no longer run through the
    # LogPublisher()
    globalLogBeginner.beginLoggingTo(observers,
                                     redirectStandardIO=redirect_stdio)
Пример #47
0
    def startLogging(self):
        """
        Start the L{twisted.logger} logging system.
        """
        logFile = self.options.get(RunnerOptions.logFile, stderr)

        fileLogObserverFactory = self.options.get(
            RunnerOptions.fileLogObserverFactory, textFileLogObserver
        )

        fileLogObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.options.get(
                RunnerOptions.defaultLogLevel, LogLevel.info
            )
        )

        filteringObserver = FilteringLogObserver(
            fileLogObserver, [logLevelPredicate]
        )

        globalLogBeginner.beginLoggingTo([filteringObserver])
Пример #48
0
def startLogging(fileobj, level=LogLevel.debug):
    fileObserver = textFileLogObserver(fileobj)
    predicate    = LogLevelFilterPredicate(defaultLogLevel=level)
    observers    = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ]
    globalLogBeginner.beginLoggingTo(observers)
Пример #49
0
def configure_logging():
    logging.basicConfig(level=logging.INFO)
    globalLogBeginner.beginLoggingTo([STDLibLogObserver(name='shinysdr')])
Пример #50
0
    # Since we're not using twisted's test runner, it's tricky to get
    # logging set up well.  Most of the time it's easiest to just
    # leave it turned off, but while working on these tests you may want
    # to uncomment one of the other lines instead.
    log.defaultObserver.stop()
    # import sys; log.startLogging(sys.stderr, setStdout=0)
    # log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
    # import logging; logging.getLogger('twisted').setLevel(logging.WARNING)

    # Twisted recently introduced a new logger; disable that one too.
    try:
        from twisted.logger import globalLogBeginner
    except ImportError:
        pass
    else:
        globalLogBeginner.beginLoggingTo([], redirectStandardIO=False)

if have_twisted:

    class LayeredTwistedIOLoop(TwistedIOLoop):
        """Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop.

        This is of course silly, but is useful for testing purposes to make
        sure we're implementing both sides of the various interfaces
        correctly.  In some tests another TornadoReactor is layered on top
        of the whole stack.
        """

        def initialize(self, **kwargs):
            # When configured to use LayeredTwistedIOLoop we can't easily
            # get the next-best IOLoop implementation, so use the lowest common
Пример #51
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Debug on (optional).')

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument('-c',
                        '--cbdir',
                        type=str,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-n',
                        '--node',
                        type=str,
                        help='Crossbar.io node ID (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=str,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('-r',
                        '--realm',
                        type=str,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container'],
                        help='Worker type (required).')

    parser.add_argument('--title',
                        type=str,
                        default=None,
                        help='Worker process title to set (optional).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from twisted.logger import globalLogBeginner
    from crossbar._logging import Logger, make_JSON_observer

    log = Logger()
    _stderr = sys.stderr
    flo = make_JSON_observer(_stderr)
    globalLogBeginner.beginLoggingTo([flo])

    try:
        import setproctitle
    except ImportError:
        log.info("Warning: could not set worker process title (setproctitle not installed)")
    else:
        # set process title if requested to
        #
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]'
            }
            setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip())

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Running under {python} using {reactor} reactor",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession
    }

    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                log.warn("Connection to node controller lost.")
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                pass
            finally:
                # loosing the connection to the node controller is fatal:
                # stop the reactor and exit with error
                if reactor.running:
                    reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
                    reactor.stop()
                # if the reactor *isn't* running, we're already shutting down

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory, "ws://localhost", debug=False, debug_wamp=False)
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        stdio.StandardIO(proto)

        # now start reactor loop
        #
        log.info("Entering event loop...")
        reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {}".format(e))
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Пример #52
0
def setup_logging(config, use_worker_options=False):
    """ Set up python logging

    Args:
        config (LoggingConfig | synapse.config.workers.WorkerConfig):
            configuration data

        use_worker_options (bool): True to use 'worker_log_config' and
            'worker_log_file' options instead of 'log_config' and 'log_file'.

        register_sighup (func | None): Function to call to register a
            sighup handler.
    """
    log_config = (config.worker_log_config if use_worker_options
                  else config.log_config)
    log_file = (config.worker_log_file if use_worker_options
                else config.log_file)

    log_format = (
        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
        " - %(message)s"
    )

    if log_config is None:
        # We don't have a logfile, so fall back to the 'verbosity' param from
        # the config or cmdline. (Note that we generate a log config for new
        # installs, so this will be an unusual case)
        level = logging.INFO
        level_for_storage = logging.INFO
        if config.verbosity:
            level = logging.DEBUG
            if config.verbosity > 1:
                level_for_storage = logging.DEBUG

        logger = logging.getLogger('')
        logger.setLevel(level)

        logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)

        formatter = logging.Formatter(log_format)
        if log_file:
            # TODO: Customisable file size / backup count
            handler = logging.handlers.RotatingFileHandler(
                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3,
                encoding='utf8'
            )

            def sighup(signum, stack):
                logger.info("Closing log file due to SIGHUP")
                handler.doRollover()
                logger.info("Opened new log file due to SIGHUP")
        else:
            handler = logging.StreamHandler()

            def sighup(*args):
                pass

        handler.setFormatter(formatter)

        handler.addFilter(LoggingContextFilter(request=""))

        logger.addHandler(handler)
    else:
        def load_log_config():
            with open(log_config, 'r') as f:
                logging.config.dictConfig(yaml.safe_load(f))

        def sighup(*args):
            # it might be better to use a file watcher or something for this.
            load_log_config()
            logging.info("Reloaded log config from %s due to SIGHUP", log_config)

        load_log_config()

    appbase.register_sighup(sighup)

    # make sure that the first thing we log is a thing we can grep backwards
    # for
    logging.warn("***** STARTING SERVER *****")
    logging.warn(
        "Server %s version %s",
        sys.argv[0], get_version_string(synapse),
    )
    logging.info("Server hostname: %s", config.server_name)

    # It's critical to point twisted's internal logging somewhere, otherwise it
    # stacks up and leaks kup to 64K object;
    # see: https://twistedmatrix.com/trac/ticket/8164
    #
    # Routing to the python logging framework could be a performance problem if
    # the handlers blocked for a long time as python.logging is a blocking API
    # see https://twistedmatrix.com/documents/current/core/howto/logger.html
    # filed as https://github.com/matrix-org/synapse/issues/1727
    #
    # However this may not be too much of a problem if we are just writing to a file.
    observer = STDLibLogObserver()

    def _log(event):

        if "log_text" in event:
            if event["log_text"].startswith("DNSDatagramProtocol starting on "):
                return

            if event["log_text"].startswith("(UDP Port "):
                return

            if event["log_text"].startswith("Timing out client"):
                return

        return observer(event)

    globalLogBeginner.beginLoggingTo(
        [_log],
        redirectStandardIO=not config.no_redirect_stdio,
    )
    if not config.no_redirect_stdio:
        print("Redirected stdout/stderr to logs")
Пример #53
0
def start_logging():
    """
    Start logging to the publisher.
    """
    globalLogBeginner.beginLoggingTo([])
Пример #54
0
            #  print 'Response headers:'
            #  print pformat(list(response.headers.getAllRawHeaders()))
            finished = Deferred()
            response.deliverBody(RestHandle(finished, self.event_handler))
            return finished
        d.addCallbacks(cbRequest, cbFail)
        return d


if __name__ == '__main__':

    import sys
    from twisted.logger import globalLogBeginner, textFileLogObserver

    observers = [textFileLogObserver(sys.stdout)]
    globalLogBeginner.beginLoggingTo(observers)

    cl = True
#     log.startLogging(sys.stdout)
    log = Logger()

    def set_temp(obj):
        temp = obj.devices(
            path='thermostats/o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
            target_temperature_c=23)
        temp.addCallback(result, 'temp set_request')

    def result(data, prefix=''):
        log.info('{prefix} request result: {data}', prefix=prefix, data=data)
#         reactor.stop()  # @UndefinedVariable
Пример #55
0
def setup_logging(config, use_worker_options=False):
    """ Set up python logging

    Args:
        config (LoggingConfig | synapse.config.workers.WorkerConfig):
            configuration data

        use_worker_options (bool): True to use 'worker_log_config' and
            'worker_log_file' options instead of 'log_config' and 'log_file'.
    """
    log_config = (config.worker_log_config if use_worker_options
                  else config.log_config)
    log_file = (config.worker_log_file if use_worker_options
                else config.log_file)

    log_format = (
        "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
        " - %(message)s"
    )

    if log_config is None:
        # We don't have a logfile, so fall back to the 'verbosity' param from
        # the config or cmdline. (Note that we generate a log config for new
        # installs, so this will be an unusual case)
        level = logging.INFO
        level_for_storage = logging.INFO
        if config.verbosity:
            level = logging.DEBUG
            if config.verbosity > 1:
                level_for_storage = logging.DEBUG

        logger = logging.getLogger('')
        logger.setLevel(level)

        logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)

        formatter = logging.Formatter(log_format)
        if log_file:
            # TODO: Customisable file size / backup count
            handler = logging.handlers.RotatingFileHandler(
                log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
            )

            def sighup(signum, stack):
                logger.info("Closing log file due to SIGHUP")
                handler.doRollover()
                logger.info("Opened new log file due to SIGHUP")
        else:
            handler = logging.StreamHandler()

            def sighup(signum, stack):
                pass

        handler.setFormatter(formatter)

        handler.addFilter(LoggingContextFilter(request=""))

        logger.addHandler(handler)
    else:
        def load_log_config():
            with open(log_config, 'r') as f:
                logging.config.dictConfig(yaml.load(f))

        def sighup(signum, stack):
            # it might be better to use a file watcher or something for this.
            logging.info("Reloading log config from %s due to SIGHUP",
                         log_config)
            load_log_config()

        load_log_config()

    # TODO(paul): obviously this is a terrible mechanism for
    #   stealing SIGHUP, because it means no other part of synapse
    #   can use it instead. If we want to catch SIGHUP anywhere
    #   else as well, I'd suggest we find a nicer way to broadcast
    #   it around.
    if getattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, sighup)

    # It's critical to point twisted's internal logging somewhere, otherwise it
    # stacks up and leaks kup to 64K object;
    # see: https://twistedmatrix.com/trac/ticket/8164
    #
    # Routing to the python logging framework could be a performance problem if
    # the handlers blocked for a long time as python.logging is a blocking API
    # see https://twistedmatrix.com/documents/current/core/howto/logger.html
    # filed as https://github.com/matrix-org/synapse/issues/1727
    #
    # However this may not be too much of a problem if we are just writing to a file.
    observer = STDLibLogObserver()
    globalLogBeginner.beginLoggingTo(
        [observer],
        redirectStandardIO=not config.no_redirect_stdio,
    )
Пример #56
0
def main(reactor):
    globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stderr)])
    return zk.connect().addCallback(zkconnected, reactor)
Пример #57
0
def start_logging(output=sys.stdout):
    wrapp = wrapp_observer(output)
    globalLogBeginner.beginLoggingTo([wrapp])
Пример #58
0
 def handle(self, *args, **options):
     globalLogBeginner.beginLoggingTo([textFileLogObserver(sys.stderr)],
                                      redirectStandardIO=False)
     react(updateFeeds, (options['max_fetch'],))
Пример #59
0
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Secant is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Secant.  If not, see <http://www.gnu.org/licenses/>.

import sys

from twisted.internet import reactor
from twisted.internet import endpoints
from twisted.logger import globalLogBeginner
from twisted.logger import textFileLogObserver
from twisted.logger import Logger

from secant import config
from secant import TacacsProtocolFactory

output = textFileLogObserver(sys.stdout)
globalLogBeginner.beginLoggingTo([output])

factory = TacacsProtocolFactory()
endpoint = endpoints.serverFromString(reactor, 'tcp:port=49')
endpoint.listen(factory)

reactor.run()
Пример #60
0
    # Since we're not using twisted's test runner, it's tricky to get
    # logging set up well.  Most of the time it's easiest to just
    # leave it turned off, but while working on these tests you may want
    # to uncomment one of the other lines instead.
    log.defaultObserver.stop()
    # import sys; log.startLogging(sys.stderr, setStdout=0)
    # log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
    # import logging; logging.getLogger('twisted').setLevel(logging.WARNING)

    # Twisted recently introduced a new logger; disable that one too.
    try:
        from twisted.logger import globalLogBeginner
    except ImportError:
        pass
    else:
        globalLogBeginner.beginLoggingTo([])

if have_twisted:
    class LayeredTwistedIOLoop(TwistedIOLoop):
        """Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop.

        This is of course silly, but is useful for testing purposes to make
        sure we're implementing both sides of the various interfaces
        correctly.  In some tests another TornadoReactor is layered on top
        of the whole stack.
        """
        def initialize(self, **kwargs):
            # When configured to use LayeredTwistedIOLoop we can't easily
            # get the next-best IOLoop implementation, so use the lowest common
            # denominator.
            self.real_io_loop = SelectIOLoop(make_current=False)