Exemple #1
0
def init(outFile):
    level = levels[config.LOG_LEVEL]
    predicate = LogLevelFilterPredicate(defaultLogLevel=level)
    observer = FilteringLogObserver(textFileLogObserver(outFile=outFile), [predicate])
    observer._encoding = "utf-8"
    globalLogPublisher.addObserver(observer)
    log.info("Start logging with {l}", l=level)
Exemple #2
0
    def test_verbose_logging(self):
        """
        If verbose logging is turned on, the full request and response is
        logged.
        """
        self.patch(helpers, 'get_site', partial(get_site, logging=True))
        logged_events = []
        addObserver(logged_events.append)
        self.addCleanup(removeObserver, logged_events.append)

        response, url = self.make_request_to_site()

        self.assertEqual(2, len(logged_events))
        self.assertTrue(all([not event['isError'] for event in logged_events]))

        messages = [get_log_message(event) for event in logged_events]
        request_match = re.compile(
            "^Received request: GET (?P<url>.+)\n"
            "Headers: (?P<headers>\{.+\})\n\s*$"
        ).match(messages[0])
        self.assertNotEqual(None, request_match)
        self.assertEqual(url, request_match.group('url'))
        headers = json.loads(request_match.group('headers'))
        self.assertEqual(['two'], headers.get('One'))

        response_match = re.compile(
            "^Responding with 200 for: GET (?P<url>.+)\n"
            "Headers: (?P<headers>\{.+\})\n"
            "\nresponse\!\n\s*$"
        ).match(messages[1])
        self.assertNotEqual(None, response_match)
        self.assertEqual(url, response_match.group('url'))
        headers = json.loads(response_match.group('headers'))
        self.assertEqual(['application/json'], headers.get('Content-Type'))
Exemple #3
0
def start_upload_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver
	from twisted.web.server import Site
	from twisted.web.resource import Resource

	from cheesepi.server.upload import UploadHandler

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18090,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.upload"))

	# Use twisted logger when in twisted
	log = Logger()

	root = Resource()
	root.putChild("upload", UploadHandler())
	upload_server = Site(root)

	reactor.listenTCP(args.port, upload_server)
	log.info("Starting upload server on port %d..." % args.port)
	reactor.run()
Exemple #4
0
def start_control_server():
	import argparse

	from twisted.internet import reactor
	from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver

	from cheesepi.server.control import (CheeseRPCServerFactory,
	                                     CheeseRPCServer)
	from cheesepi.server.storage.mongo import MongoDAO

	# Argument parsing
	parser = argparse.ArgumentParser()
	parser.add_argument('--port', type=int, default=18080,
	                    help='Port to listen on')
	args = parser.parse_args()

	init_logging()

	# Make twisted logging write to pythons logging module
	globalLogPublisher.addObserver(STDLibLogObserver(name="cheesepi.server.control"))

	# Use twisted logger when in twisted
	log = Logger()

	# Logging
	#log = Logger()
	#globalLogPublisher.addObserver(PrintingObserver())

	#dao = MongoDAO()
	dao = MongoDAO('localhost', 27017)
	control_server = CheeseRPCServer(dao).getStreamFactory(CheeseRPCServerFactory)

	reactor.listenTCP(args.port, control_server)
	log.info("Starting control server on port %d..." % args.port)
	reactor.run()
    def startService(self):
        self.stdlib_cleanup = stdlib_logging_to_eliot_configuration(getLogger())
        self.twisted_observer = TwistedLoggerToEliotObserver()
        globalLogPublisher.addObserver(self.twisted_observer)

        for dest in self.destinations:
            add_destination(dest)
Exemple #6
0
def noiseControl(options):
    # terminal noise/info logic
    # allows the specification of the log file location
    if not options["loud"]:
        log_path = options["log"]
        globalLogPublisher.addObserver(hendrixObserver(log_path))
    return None
Exemple #7
0
 def __init__(self, reactor, config_filename):
     self._network = None
     self._proc = None
     self._reactor = reactor
     self._config_filename = config_filename
     self.connections = dict()
     with open(config_filename) as f:
         self.config = json.load(f)
     f = open(self.core_config["log_file"], "a")
     globalLogPublisher.addObserver(textFileLogObserver(f))
     self.api = ApiProxy(self._reactor)
     self.server_factory = pb.PBServerFactory(self.api)
def init_logging(log_level):
    """
    Initialise the logging by adding an observer to the global log publisher.

    :param str log_level: The minimum log level to log messages for.
    """
    log_level_filter = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level))
    log_level_filter.setLogLevelForNamespace(
        'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
    log_observer = FilteringLogObserver(
        textFileLogObserver(sys.stdout), [log_level_filter])
    globalLogPublisher.addObserver(log_observer)
Exemple #9
0
def getLogger(level):

    loglevel = getattr(LogLevel, level)
    filter_ = LogLevelFilterPredicate(defaultLogLevel=loglevel)
    if loglevel > LogLevel.debug:
        filter_.setLogLevelForNamespace('stdout', LogLevel.warn)
    observer = FilteringLogObserver(stdoutFileLogObserver(), [filter_])
#     observer = FilteringLogObserver(globalLogPublisher, [filter])
#     log = Logger()

#     globalLogBeginner.beginLoggingTo([observer])
    globalLogPublisher.addObserver(observer)
    return lambda event: None
Exemple #10
0
    def startService(self):
        super(SpreadFlowService, self).startService()

        if self.options['confpath']:
            confpath = self.options['confpath']
        else:
            confpath = os.path.join(os.getcwd(), 'spreadflow.conf')

        stream = config_eval(confpath)

        pipeline = list()
        pipeline.append(AliasResolverPass())
        pipeline.append(PortsValidatorPass())

        if self.options['multiprocess']:
            pipeline.append(PartitionExpanderPass())
            pipeline.append(PartitionBoundsPass())
            if self.options['partition']:
                pipeline.append(PartitionWorkerPass())
                partition = self.options['partition']
                stream.append(AddTokenOp(PartitionSelectToken(partition)))
            else:
                pipeline.append(PartitionControllersPass())

        pipeline.append(ComponentsPurgePass())
        pipeline.append(EventHandlersPass())

        for compiler_step in pipeline:
            stream = compiler_step(stream)

        self._eventdispatcher = EventDispatcher()

        if self.options['oneshot']:
            self._eventdispatcher.add_listener(JobEvent, 0, self._oneshot_job_event_handler)

        connection_parser = ConnectionParser()
        stream = connection_parser.extract(stream)
        self._scheduler = Scheduler(connection_parser.get_portmap(), self._eventdispatcher)

        event_handler_parser = EventHandlerParser()
        stream = event_handler_parser.extract(stream)
        for event_type, priority, callback in event_handler_parser.get_handlers():
            self._eventdispatcher.add_listener(event_type, priority, callback)

        if self.options['queuestatus']:
            statuslog = SpreadFlowQueuestatusLogger(self.options['queuestatus'])
            statuslog.watch(1, self._scheduler)
            globalLogPublisher.addObserver(statuslog.logstatus)

        self._scheduler.run().addBoth(self._stop)
Exemple #11
0
    def test_doStartLoggingStatement(self):
        """
        L{Factory.doStart} logs that it is starting a factory, followed by
        the L{repr} of the L{Factory} instance that is being started.
        """
        events = []
        globalLogPublisher.addObserver(events.append)
        self.addCleanup(
            lambda: globalLogPublisher.removeObserver(events.append))

        f = Factory()
        f.doStart()

        self.assertIs(events[0]['factory'], f)
        self.assertEqual(events[0]['log_level'], LogLevel.info)
        self.assertEqual(events[0]['log_format'],
                         'Starting factory {factory!r}')
Exemple #12
0
    def redirect_to_twisted(self):
        """
        Redirect Eliot logs to Twisted.

        @return: L{list} of L{dict} - the log messages written to Twisted will
             eventually be appended to this list.
        """
        written = []

        def got_event(event):
            if event.get("log_namespace") == "eliot":
                written.append((event["log_level"].name, event["eliot"]))

        globalLogPublisher.addObserver(got_event)
        self.addCleanup(globalLogPublisher.removeObserver, got_event)
        destination = TwistedDestination()
        addDestination(destination)
        self.addCleanup(removeDestination, destination)
        return written
Exemple #13
0
def main():
    parser = argparse.ArgumentParser()

    # Network options
    group_network_container = parser.add_argument_group(title="Network options")
    group_network = group_network_container.add_mutually_exclusive_group(required=True)
    group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet")
    group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet")
    group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet")
    group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet")
    group_network.add_argument("--config", action="store", help="Use a specific config file")

    # Ports for RPC and REST api
    group_modes = parser.add_argument_group(title="Mode(s)")
    group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)")
    group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)")

    # Advanced logging setup
    group_logging = parser.add_argument_group(title="Logging options")
    group_logging.add_argument("--logfile", action="store", type=str, help="Logfile")
    group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)")
    group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').")
    group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger")

    # Where to store stuff
    parser.add_argument("--datadir", action="store",
                        help="Absolute path to use for database directories")
    # peers
    parser.add_argument("--maxpeers", action="store", default=5,
                        help="Max peers to use for P2P Joining")

    # If a wallet should be opened
    parser.add_argument("--wallet",
                        action="store",
                        help="Open wallet. Will allow you to use methods that require an open wallet")

    # host
    parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0")

    # Now parse
    args = parser.parse_args()
    # print(args)

    if not args.port_rpc and not args.port_rest:
        print("Error: specify at least one of --port-rpc / --port-rest")
        parser.print_help()
        return

    if args.port_rpc == args.port_rest:
        print("Error: --port-rpc and --port-rest cannot be the same")
        parser.print_help()
        return

    if args.logfile and (args.syslog or args.syslog_local):
        print("Error: Cannot only use logfile or syslog at once")
        parser.print_help()
        return

    # Setting the datadir must come before setting the network, else the wrong path is checked at net setup.
    if args.datadir:
        settings.set_data_dir(args.datadir)

    # Network configuration depending on command line arguments. By default, the testnet settings are already loaded.
    if args.config:
        settings.setup(args.config)
    elif args.mainnet:
        settings.setup_mainnet()
    elif args.testnet:
        settings.setup_testnet()
    elif args.privnet:
        settings.setup_privnet()
    elif args.coznet:
        settings.setup_coznet()

    if args.maxpeers:
        settings.set_max_peers(args.maxpeers)

    if args.syslog or args.syslog_local is not None:
        # Setup the syslog facility
        if args.syslog_local is not None:
            print("Logging to syslog local%s facility" % args.syslog_local)
            syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local
        else:
            print("Logging to syslog user facility")
            syslog_facility = SysLogHandler.LOG_USER

        # Setup logzero to only use the syslog handler
        logzero.syslog(facility=syslog_facility)
    else:
        # Setup file logging
        if args.logfile:
            logfile = os.path.abspath(args.logfile)
            if args.disable_stderr:
                print("Logging to logfile: %s" % logfile)
            else:
                print("Logging to stderr and logfile: %s" % logfile)
            logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr)

        else:
            print("Logging to stdout and stderr")

    if args.wallet:
        if not os.path.exists(args.wallet):
            print("Wallet file not found")
            return

        passwd = os.environ.get('NEO_PYTHON_JSONRPC_WALLET_PASSWORD', None)
        if not passwd:
            passwd = prompt("[password]> ", is_password=True)

        password_key = to_aes_key(passwd)
        try:
            wallet = UserWallet.Open(args.wallet, password_key)

        except Exception as e:
            print(f"Could not open wallet {e}")
            return
    else:
        wallet = None

    # Disable logging smart contract events
    settings.set_log_smart_contract_events(False)

    # Write a PID file to easily quit the service
    write_pid_file()

    # Setup Twisted and Klein logging to use the logzero setup
    observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER)
    globalLogPublisher.addObserver(observer)

    # Instantiate the blockchain and subscribe to notifications
    blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
    Blockchain.RegisterBlockchain(blockchain)
    dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
    dbloop.start(.1)

    # If a wallet is open, make sure it processes blocks
    if wallet:
        walletdb_loop = task.LoopingCall(wallet.ProcessBlocks)
        walletdb_loop.start(1)

    # Setup twisted reactor, NodeLeader and start the NotificationDB
    reactor.suggestThreadPoolSize(15)
    NodeLeader.Instance().Start()
    NotificationDB.instance().start()

    # Start a thread with custom code
    d = threading.Thread(target=custom_background_code)
    d.setDaemon(True)  # daemonizing the thread will kill it when the main thread is quit
    d.start()

    if args.port_rpc:
        logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc))
        api_server_rpc = JsonRpcApi(args.port_rpc, wallet=wallet)
        endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host)
        endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource()))
#        reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc))
#        api_server_rpc.app.run(args.host, args.port_rpc)

    if args.port_rest:
        logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest))
        api_server_rest = RestApi()
        endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host)
        endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource()))
#        api_server_rest.app.run(args.host, args.port_rest)

    reactor.run()

    # After the reactor is stopped, gracefully shutdown the database.
    logger.info("Closing databases...")
    NotificationDB.close()
    Blockchain.Default().Dispose()
    NodeLeader.Instance().Shutdown()
    if wallet:
        wallet.Close()
Exemple #14
0
 def __enter__(self):
     set_global_log_level(self.desired_level)
     globalLogPublisher.addObserver(self._got_log)
     return self
Exemple #15
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")

    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument(
        '--loglevel',
        default="info",
        choices=['none', 'error', 'warn', 'info', 'debug', 'trace'],
        help='Initial log level.')

    parser.add_argument('-c',
                        '--cbdir',
                        type=six.text_type,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-n',
                        '--node',
                        type=six.text_type,
                        help='Crossbar.io node ID (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=six.text_type,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('-r',
                        '--realm',
                        type=six.text_type,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container', 'websocket-testee'],
                        help='Worker type (required).')

    parser.add_argument('--title',
                        type=six.text_type,
                        default=None,
                        help='Worker process title to set (optional).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware, _stderr
    from crossbar._logging import make_logger, start_logging, set_global_log_level
    from twisted.logger import globalLogPublisher

    # Set the global log level
    set_global_log_level(options.loglevel)

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=_stderr)
    _stderr.flush()

    flo = make_JSON_observer(_stderr)
    globalLogPublisher.addObserver(flo)
    start_logging()

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Worker process starting ({python}-{reactor}) ..",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]',
                'websocket-testee': 'crossbar-worker [websocket-testee]'
            }
            setproctitle.setproctitle(
                WORKER_TYPE_TO_TITLE[options.type].strip())

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession
    from crossbar.worker.testee import WebSocketTesteeWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession,
        'websocket-testee': WebSocketTesteeWorkerSession
    }

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):
        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}",
                             reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit,
                                              exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory,
                                                       "ws://localhost",
                                                       debug=False,
                                                       debug_wamp=False)
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {}".format(e))
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Exemple #16
0
 def start_sentry_logging(cls, dsn: str):
     _SentryInitGuard.init(dsn)
     globalLogPublisher.addObserver(sentry_observer)
Exemple #17
0
def run(options, reactor=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")

    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # eg: crossbar.worker.container.ContainerWorkerSession
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Started {worker_title} worker "{worker_id}" on node "{node_id}" [{klass} / {python}-{reactor}]',
        worker_title=klass.WORKER_TITLE,
        klass=options.klass,
        node_id=options.node,
        worker_id=options.worker,
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):
        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}",
                             reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit,
                                              exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = klass

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory,
                                                       u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Exemple #18
0
def _startlog(options, reactor):
    """
    Start the logging in a way that all the subcommands can use it.
    """
    from twisted.logger import globalLogPublisher
    from txaio import start_logging, set_global_log_level

    loglevel = getattr(options, "loglevel", "info")
    logformat = getattr(options, "logformat", "none")
    colour = getattr(options, "colour", "auto")

    set_global_log_level(loglevel)

    # The log observers (things that print to stderr, file, etc)
    observers = []

    if getattr(options, "logtofile", False):
        # We want to log to a file
        from crossbar._logging import make_logfile_observer

        if not options.logdir:
            logdir = options.cbdir
        else:
            logdir = options.logdir

        logfile = os.path.join(logdir, "node.log")

        if loglevel in ["error", "warn", "info"]:
            show_source = False
        else:
            show_source = True

        observers.append(make_logfile_observer(logfile, show_source))
    else:
        # We want to log to stdout/stderr.
        from crossbar._logging import make_stdout_observer
        from crossbar._logging import make_stderr_observer
        from crossbar._logging import LogLevel

        if colour == "auto":
            if sys.__stdout__.isatty():
                colour = True
            else:
                colour = False
        elif colour == "true":
            colour = True
        else:
            colour = False

        if loglevel == "none":
            # Do no logging!
            pass
        elif loglevel in ["error", "warn", "info"]:
            # Print info to stdout, warn+ to stderr
            observers.append(make_stdout_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "debug":
            # Print debug+info to stdout, warn+ to stderr, with the class
            # source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "trace":
            # Print trace+, with the class source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  trace=True,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        else:
            assert False, "Shouldn't ever get here."

    for observer in observers:
        globalLogPublisher.addObserver(observer)

        # Make sure that it goes away
        reactor.addSystemEventTrigger('after', 'shutdown',
                                      globalLogPublisher.removeObserver, observer)

    # Actually start the logger.
    start_logging(None, loglevel)
Exemple #19
0
import sys

from twisted.logger import (Logger, LogLevel, LogLevelFilterPredicate,
                            FilteringLogObserver, textFileLogObserver,
                            globalLogPublisher)

__all__ = ["logger"]

predicate = LogLevelFilterPredicate(defaultLogLevel=LogLevel.debug)
observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [predicate])
observer._encoding = "utf-8"
globalLogPublisher.addObserver(observer)

logger = Logger()
Exemple #20
0
def test_invalid_workers_tolerance(testerchain,
                                   test_registry,
                                   blockchain_ursulas,
                                   agency,
                                   idle_staker,
                                   token_economics,
                                   ursula_decentralized_test_config
                                   ):
    #
    # Setup
    #
    lonely_blockchain_learner, blockchain_teacher, unsigned, *the_others = list(blockchain_ursulas)
    _, staking_agent, _ = agency

    warnings = []

    def warning_trapper(event):
        if event['log_level'] == LogLevel.warn:
            warnings.append(event)

    # We start with an "idle_staker" (i.e., no tokens in StakingEscrow)
    assert 0 == staking_agent.owned_tokens(idle_staker.checksum_address)

    # Now let's create an active worker for this staker.
    # First, stake something (e.g. the bare minimum)
    amount = token_economics.minimum_allowed_locked
    periods = token_economics.minimum_locked_periods

    # Mock Powerup consumption (Staker)
    testerchain.transacting_power = TransactingPower(account=idle_staker.checksum_address)

    idle_staker.initialize_stake(amount=amount, lock_periods=periods)

    # Stake starts next period (or else signature validation will fail)
    testerchain.time_travel(periods=1)
    idle_staker.stake_tracker.refresh()

    # We create an active worker node for this staker
    worker = make_ursula_for_staker(staker=idle_staker,
                                    worker_address=testerchain.unassigned_accounts[-1],
                                    ursula_config=ursula_decentralized_test_config,
                                    blockchain=testerchain,
                                    commit_now=True,
                                    ursulas_to_learn_about=None)

    # Since we made a commitment, we need to advance one period
    testerchain.time_travel(periods=1)

    # The worker is valid and can be verified (even with the force option)
    worker.verify_node(force=True, network_middleware=MockRestMiddleware(), certificate_filepath="quietorl")
    # In particular, we know that it's bonded to a staker who is really staking.
    assert worker._worker_is_bonded_to_staker(registry=test_registry)
    assert worker._staker_is_really_staking(registry=test_registry)

    # OK. Now we learn about this worker.
    lonely_blockchain_learner.remember_node(worker)

    # The worker already committed one period before. Let's commit to the remaining 29.
    for i in range(29):
        worker.commit_to_next_period()
        testerchain.time_travel(periods=1)

    # The stake period has ended, and the staker wants her tokens back ("when lambo?").
    # She withdraws up to the last penny (well, last nunit, actually).

    # Mock Powerup consumption (Staker)
    testerchain.transacting_power = TransactingPower(account=idle_staker.checksum_address)
    idle_staker.mint()
    testerchain.time_travel(periods=1)
    i_want_it_all = staking_agent.owned_tokens(idle_staker.checksum_address)
    idle_staker.withdraw(i_want_it_all)

    # OK...so...the staker is not staking anymore ...
    assert 0 == staking_agent.owned_tokens(idle_staker.checksum_address)

    # ... but the worker node still is "verified" (since we're not forcing on-chain verification)
    worker.verify_node(network_middleware=MockRestMiddleware(), certificate_filepath="quietorl")

    # If we force, on-chain verification, the worker is of course not verified
    with pytest.raises(worker.NotStaking):
        worker.verify_node(force=True, network_middleware=MockRestMiddleware(), certificate_filepath="quietorl")

    # Let's learn from this invalid node
    lonely_blockchain_learner._current_teacher_node = worker
    globalLogPublisher.addObserver(warning_trapper)
    lonely_blockchain_learner.learn_from_teacher_node()
    # lonely_blockchain_learner.remember_node(worker)  # The same problem occurs if we directly try to remember this node
    globalLogPublisher.removeObserver(warning_trapper)

    # TODO: What should we really check here? (#1075)
    assert len(warnings) == 1
    warning = warnings[-1]['log_format']
    assert str(worker) in warning
    assert "no active stakes" in warning  # TODO: Cleanup logging templates
    assert worker not in lonely_blockchain_learner.known_nodes
Exemple #21
0
def create_application(args=None, twistd_user=None, log_group=None):
    home_dir = util.get_home_dir()
    os.chdir(home_dir)
    is_logging_insecure = False

    # parse command-line args, if appropriate
    primary_only_time = None
    if args:
        option_parser = argparse.ArgumentParser()

        option_parser.add_argument(
            "--primary-only",
            type=int,
            nargs="?",
            help=
            "This option disables secondary authentication for the specified number of minutes (default 60)",
            default=None,
            const=60,
        )
        option_parser.add_argument(
            "--logging-insecure",
            action="store_true",
            help=
            "This option enables debug, and prints logs containing passwords and possibly other secrets.",
            default=False,
        )
        options = option_parser.parse_args()
        is_logging_insecure = options.logging_insecure
        primary_only_time = options.primary_only

    config_filename = os.path.join("conf", "authproxy.cfg")
    configuration = config_provider.get_config(config_filename)

    if primary_only_time is not None:
        if primary_only_time > 240:
            print(
                "Primary only mode can only be enabled for a maximum of 4 hours (240 minutes)"
            )
            sys.exit(2)
        else:
            PrimaryOnlyManager.enable_primary_only(primary_only_time)

    main_config = configuration.get_main_section_config()
    if main_config:
        log.msg("Main Configuration:")
        log.config(main_config)

    fips_mode = main_config.get_bool("fips_mode", False)
    if fips_mode:
        fips_manager.enable()

    # Set up our observers
    if is_logging_insecure:
        observers = [textFileLogObserver(sys.stdout)]
    else:
        observers = log_observation.get_observers(main_config, twistd_user,
                                                  log_group)

    for observer in observers:
        globalLogPublisher.addObserver(observer)

    # Global debug mode
    if is_logging_insecure:
        debug_mode = True
    else:
        debug_mode = main_config.get_bool("debug", False)

    http.set_debug(debug_mode)
    http.set_is_logging_insecure(is_logging_insecure)

    # Create main application.
    application = Application("duoauthproxy")
    LogReadyService().setServiceParent(application)

    fips_mode = fips_manager.status()
    if fips_mode:
        log.msg("FIPS mode {0} is enabled with {1}".format(
            fips_mode, fips_manager.get_openssl_version()))
    else:
        log.msg("FIPS mode is not enabled")

    # get ca certs file
    http_ca_certs_file = main_config.get_str("http_ca_certs_file", "")
    if http_ca_certs_file:
        http_ca_certs_file = util.resolve_file_path(http_ca_certs_file)
    else:
        http_ca_certs_file = os.path.join("conf",
                                          const.DEFAULT_HTTP_CERTS_FILE)

    # read ca certs
    if not os.path.isfile(http_ca_certs_file):
        http_ca_certs_file = os.path.join("conf", http_ca_certs_file)
    with open(http_ca_certs_file, "r") as bundle_fp:
        http.set_ca_certs(ssl_verify.load_ca_bundle(bundle_fp))

    # get proxy settings
    http_proxy_host = main_config.get_str("http_proxy_host", "")
    http_proxy_port = main_config.get_int("http_proxy_port", 80)
    if http_proxy_host:
        http.set_proxy(http_proxy_host, http_proxy_port)

    sections = section.parse_sections(configuration, is_logging_insecure)
    module_factory = section.ModuleFactory(sections, application)
    modules_by_type = module_factory.make_modules()

    if not any(modules_by_type.values()):
        raise config_error.ConfigError("No integrations in config file.")

    # Setup forwarding/server pairs by port
    for port, interface in modules_by_type.get("server", []):
        server_networks = {}
        server_names = {}
        for section_name, server_module, server_config in modules_by_type[
                "server"][(port, interface)]:
            client_name = configuration.get_section_client(section_name)

            if not client_name:
                if server_module.Module.no_client:
                    modules_by_type["client"]["no_client"] = None
                    client_name = "no_client"
                else:
                    raise config_error.ConfigError(
                        'Neither module %s or main has "client" value' %
                        section_name)

            if section_name.startswith(
                    "ldap_server_auto"
            ) and not client_name.startswith("ad_client"):
                raise config_error.ConfigError(
                    "ad_client is required by ldap_server_auto. No ad_client found in config file. "
                )

            if client_name != "radius_client" and server_config.get_str(
                    "pass_through_attr_names", ""):
                raise config_error.ConfigError(
                    "Can only pass through radius attributes if using a radius client"
                )
            server_instance = server_module.Module(
                server_config, modules_by_type["client"][client_name],
                section_name)
            server_instance.setServiceParent(application)

            if section_name.startswith("radius_server_"):
                server_networks[server_instance] = parse_radius_secrets(
                    server_config).keys()
                server_names[server_instance] = section_name

        if server_names:
            forward_module = forward_serv
            forward_instance = forward_module.Module(
                port=port,
                servers=server_networks,
                server_names=server_names,
                interface=interface,
                debug=debug_mode,
            )
            forward_instance.setServiceParent(application)

    # set user-agent
    sections = ",".join(sorted(set(configuration.list_sections())))
    user_agent = "duoauthproxy/{0} ({1}; Python{2}; {3})".format(
        get_version(), platform.platform(), platform.python_version(),
        sections)
    http.set_user_agent(user_agent)

    # Authproxy uses globalLogPublisher to emit events. Defining a no-op emitter will squelch the creation
    # of the unwatned twistd default logging mechanisms.
    def no_op_emitter(eventDict):
        pass

    application.setComponent(ILogObserver, no_op_emitter)

    return application
Exemple #22
0
from .defaults import DEFAULT_LOG_FILE
from twisted.logger import (
    ILogObserver, jsonFileLogObserver, FilteringLogObserver,
    LogLevelFilterPredicate, LogLevel, globalLogPublisher
)
from zope.interface import provider

import io


@provider(ILogObserver)
def hendrixObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
    json_observer = jsonFileLogObserver(
        io.open(path, 'a')
    )
    return FilteringLogObserver(
        json_observer,
        [LogLevelFilterPredicate(log_level), ]
    )


globalLogPublisher.addObserver(hendrixObserver(log_level=LogLevel.debug))
Exemple #23
0
 def __attrs_post_init__(self) -> None:
     globalLogPublisher.addObserver(self.storeObserver)
Exemple #24
0
def _run_command_exec_worker(options, reactor=None, personality=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes
    MEASURING_COVERAGE = False
    if 'COVERAGE_PROCESS_START' in os.environ:
        try:
            import coverage
        except ImportError:
            pass
        else:
            # The following will read the environment variable COVERAGE_PROCESS_START,
            # and that should be set to the .coveragerc file:
            #
            #   export COVERAGE_PROCESS_START=${PWD}/.coveragerc
            #
            coverage.process_startup()
            MEASURING_COVERAGE = True

    # we use an Autobahn utility to import the "best" available Twisted reactor
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # make sure logging to something else than stdio is setup _first_
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.
    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # get personality klass, eg "crossbar.personality.Personality"
    l = options.personality.split('.')
    personality_module, personality_klass = '.'.join(l[:-1]), l[-1]

    # now load the personality module and class
    _mod = importlib.import_module(personality_module)
    Personality = getattr(_mod, personality_klass)

    # get worker klass, eg "crossbar.worker.container.ContainerController"
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Starting worker "{worker_id}" for node "{node_id}" with personality "{personality}" {worker_class}',
        worker_id=options.worker,
        node_id=options.node,
        personality=Personality.NAME,
        worker_class=hltype(klass),
    )
    log.info(
        'Running as PID {pid} on {python}-{reactor}',
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )
    if MEASURING_COVERAGE:
        log.info(hl('Code coverage measurements enabled (coverage={coverage_version}).', color='green', bold=True),
                 coverage_version=coverage.__version__)

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle(
                'crossbar-worker [{}]'.format(options.klass)
            )

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}", reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # define a WAMP application session factory
        #
        from autobahn.wamp.types import ComponentConfig

        def make_session():
            session_config = ComponentConfig(realm=options.realm, extra=options)
            session = klass(config=session_config, reactor=reactor, personality=Personality)
            return session

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(make_session, u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

            vmprof.disable()
        else:
            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Exemple #25
0
def test_emit_warning_upon_new_version(ursula_federated_test_config, caplog):
    nodes = make_federated_ursulas(ursula_config=ursula_federated_test_config,
                                   quantity=3,
                                   know_each_other=False)
    teacher, learner, new_node = nodes

    learner.remember_node(teacher)
    teacher.remember_node(learner)
    teacher.remember_node(new_node)

    new_node.TEACHER_VERSION = learner.LEARNER_VERSION + 1

    warnings = []

    def warning_trapper(event):
        if event['log_level'] == LogLevel.warn:
            warnings.append(event)

    globalLogPublisher.addObserver(warning_trapper)
    learner.learn_from_teacher_node()

    assert len(warnings) == 1
    assert warnings[0]['log_format'] == learner.unknown_version_message.format(
        new_node, new_node.TEACHER_VERSION, learner.LEARNER_VERSION)

    # Now let's go a little further: make the version totally unrecognizable.

    # First, there's enough garbage to at least scrape a potential checksum address
    fleet_snapshot = os.urandom(32 + 4)
    random_bytes = os.urandom(50)  # lots of garbage in here
    future_version = learner.LEARNER_VERSION + 42
    version_bytes = future_version.to_bytes(2, byteorder="big")
    crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes +
                                                            random_bytes)
    signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes))

    Response = namedtuple("MockResponse", ("content", "status_code"))
    response = Response(content=signed_crazy_bytes + crazy_bytes,
                        status_code=200)

    learner._current_teacher_node = teacher
    learner.network_middleware.get_nodes_via_rest = lambda *args, **kwargs: response
    learner.learn_from_teacher_node()

    # If you really try, you can read a node representation from the garbage
    accidental_checksum = to_checksum_address(random_bytes[:20])
    accidental_nickname = nickname_from_seed(accidental_checksum)[0]
    accidental_node_repr = Character._display_name_template.format(
        "Ursula", accidental_nickname, accidental_checksum)

    assert len(warnings) == 2
    assert warnings[1]['log_format'] == learner.unknown_version_message.format(
        accidental_node_repr, future_version, learner.LEARNER_VERSION)

    # This time, however, there's not enough garbage to assume there's a checksum address...
    random_bytes = os.urandom(2)
    crazy_bytes = fleet_snapshot + VariableLengthBytestring(version_bytes +
                                                            random_bytes)
    signed_crazy_bytes = bytes(teacher.stamp(crazy_bytes))

    response = Response(content=signed_crazy_bytes + crazy_bytes,
                        status_code=200)

    learner._current_teacher_node = teacher
    learner.learn_from_teacher_node()

    assert len(warnings) == 3
    # ...so this time we get a "really unknown version message"
    assert warnings[2][
        'log_format'] == learner.really_unknown_version_message.format(
            future_version, learner.LEARNER_VERSION)

    globalLogPublisher.removeObserver(warning_trapper)
 def activate(cls, host):
     cls.producer = KafkaProducer(bootstrap_servers=host)
     globalLogPublisher.addObserver(kafka_observer)
Exemple #27
0
def _run_command_exec_worker(options, reactor=None, personality=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes
    MEASURING_COVERAGE = False
    if 'COVERAGE_PROCESS_START' in os.environ:
        try:
            import coverage
        except ImportError:
            pass
        else:
            # The following will read the environment variable COVERAGE_PROCESS_START,
            # and that should be set to the .coveragerc file:
            #
            #   export COVERAGE_PROCESS_START=${PWD}/.coveragerc
            #
            coverage.process_startup()
            MEASURING_COVERAGE = True

    # we use an Autobahn utility to import the "best" available Twisted reactor
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # make sure logging to something else than stdio is setup _first_
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.
    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")

    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # get personality klass, eg "crossbar.personality.Personality"
    l = options.personality.split('.')
    personality_module, personality_klass = '.'.join(l[:-1]), l[-1]

    # now load the personality module and class
    _mod = importlib.import_module(personality_module)
    Personality = getattr(_mod, personality_klass)

    # get worker klass, eg "crossbar.worker.container.ContainerController"
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Starting worker "{worker_id}" for node "{node_id}" on realm "{realm}" with personality "{personality}" {worker_class}',
        worker_id=options.worker,
        node_id=options.node,
        realm=options.realm,
        personality=Personality.NAME,
        worker_class=hltype(klass),
    )
    log.info(
        'Running as PID {pid} on {python}-{reactor}',
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )
    if MEASURING_COVERAGE:
        log.info(hl(
            'Code coverage measurements enabled (coverage={coverage_version}).',
            color='green',
            bold=True),
                 coverage_version=coverage.__version__)

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):
        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}",
                             reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit,
                                              exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    # if vmprof global profiling is enabled via command line option, this will carry
    # the file where vmprof writes its profile data
    _vm_prof = {
        # need to put this into a dict, since FDs are ints, and python closures can't
        # write to this otherwise
        'outfd': None
    }

    # https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorCore.html
    # Each "system event" in Twisted, such as 'startup', 'shutdown', and 'persist', has 3 phases:
    # 'before', 'during', and 'after' (in that order, of course). These events will be fired
    # internally by the Reactor.

    def before_reactor_started():
        term_print('CROSSBAR[{}]:REACTOR_STARTING'.format(options.worker))

    def after_reactor_started():
        term_print('CROSSBAR[{}]:REACTOR_STARTED'.format(options.worker))

        if options.vmprof:
            outfn = os.path.join(
                options.cbdir,
                '.vmprof-worker-{}-{}.dat'.format(options.worker, os.getpid()))
            _vm_prof['outfd'] = os.open(outfn,
                                        os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(_vm_prof['outfd'], period=0.01)
            term_print('CROSSBAR[{}]:VMPROF_ENABLED:{}'.format(
                options.worker, outfn))

    def before_reactor_stopped():
        term_print('CROSSBAR[{}]:REACTOR_STOPPING'.format(options.worker))

        if options.vmprof and _vm_prof['outfd']:
            vmprof.disable()
            term_print('CROSSBAR[{}]:VMPROF_DISABLED'.format(options.worker))

    def after_reactor_stopped():
        # FIXME: we are indeed reaching this line, however,
        # the log output does not work (it also doesnt work using
        # plain old print). Dunno why.

        # my theory about this issue is: by the time this line
        # is reached, Twisted has already closed the stdout/stderr
        # pipes. hence we do an evil trick: we directly write to
        # the process' controlling terminal
        # https://unix.stackexchange.com/a/91716/52500
        term_print('CROSSBAR[{}]:REACTOR_STOPPED'.format(options.worker))

    reactor.addSystemEventTrigger('before', 'startup', before_reactor_started)
    reactor.addSystemEventTrigger('after', 'startup', after_reactor_started)
    reactor.addSystemEventTrigger('before', 'shutdown', before_reactor_stopped)
    reactor.addSystemEventTrigger('after', 'shutdown', after_reactor_stopped)

    try:
        # define a WAMP application session factory
        #
        from autobahn.wamp.types import ComponentConfig

        def make_session():
            session_config = ComponentConfig(realm=options.realm,
                                             extra=options)
            session = klass(config=session_config,
                            reactor=reactor,
                            personality=Personality)
            return session

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(make_session,
                                                       u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        log.info(hl('Entering event reactor ...', color='green', bold=True))
        reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Exemple #28
0
def _start_logging(options, reactor):
    """
    Start the logging in a way that all the subcommands can use it.
    """
    loglevel = getattr(options, "loglevel", "info")
    logformat = getattr(options, "logformat", "none")
    color = getattr(options, "color", "auto")

    set_global_log_level(loglevel)

    # The log observers (things that print to stderr, file, etc)
    observers = []

    if getattr(options, "logtofile", False):
        # We want to log to a file
        if not options.logdir:
            logdir = options.cbdir
        else:
            logdir = options.logdir

        logfile = os.path.join(logdir, "node.log")

        if loglevel in ["error", "warn", "info"]:
            show_source = False
        else:
            show_source = True

        observers.append(make_logfile_observer(logfile, show_source))
    else:
        # We want to log to stdout/stderr.

        if color == "auto":
            if sys.__stdout__.isatty():
                color = True
            else:
                color = False
        elif color == "true":
            color = True
        else:
            color = False

        if loglevel == "none":
            # Do no logging!
            pass
        elif loglevel in ["error", "warn", "info"]:
            # Print info to stdout, warn+ to stderr
            observers.append(
                make_stdout_observer(show_source=False,
                                     format=logformat,
                                     color=color))
            observers.append(
                make_stderr_observer(show_source=False,
                                     format=logformat,
                                     color=color))
        elif loglevel == "debug":
            # Print debug+info to stdout, warn+ to stderr, with the class
            # source
            observers.append(
                make_stdout_observer(show_source=True,
                                     levels=(LogLevel.info, LogLevel.debug),
                                     format=logformat,
                                     color=color))
            observers.append(
                make_stderr_observer(show_source=True,
                                     format=logformat,
                                     color=color))
        elif loglevel == "trace":
            # Print trace+, with the class source
            observers.append(
                make_stdout_observer(show_source=True,
                                     levels=(LogLevel.info, LogLevel.debug),
                                     format=logformat,
                                     trace=True,
                                     color=color))
            observers.append(
                make_stderr_observer(show_source=True,
                                     format=logformat,
                                     color=color))
        else:
            assert False, "Shouldn't ever get here."

    for observer in observers:
        globalLogPublisher.addObserver(observer)

        # Make sure that it goes away
        reactor.addSystemEventTrigger('after', 'shutdown',
                                      globalLogPublisher.removeObserver,
                                      observer)

    # Actually start the logger.
    start_logging(None, loglevel)
Exemple #29
0
def enable_twisted_log_observer():
    globalLogPublisher.addObserver(twisted_log_observer)
Exemple #30
0
 def startService(self):
     self.stdlib_cleanup = _stdlib_logging_to_eliot_configuration(getLogger())
     self.twisted_observer = _TwistedLoggerToEliotObserver()
     globalLogPublisher.addObserver(self.twisted_observer)
     add_destinations(*self.destinations)
     return Service.startService(self)
Exemple #31
0
    def requestAvatar(self, avatarId, mind, *interfaces):
        if IFTPShell in interfaces:
            return (IFTPShell, FTPShell(filepath.FilePath(VIDEOROOT)),
                    lambda: None)
        if IMessageDelivery in interfaces:
            return (IMessageDelivery, ConsoleMessageDelivery(), lambda: None)

        raise NotImplementedError(
            "Unable to provide avatar for interfaces provided ({})".format(
                interfaces))


log = Logger("amcrest")
predicate = LogLevelFilterPredicate(LogLevel.warn)
predicate.setLogLevelForNamespace("amcrest", LogLevel.debug)
globalLogPublisher.addObserver(
    FilteringLogObserver(textFileLogObserver(sys.stderr), (predicate, )))

portal = Portal(AmcrestRealm(), [
    FilePasswordDB(
        os.path.join(os.path.dirname(os.path.abspath(__file__)),
                     "passwords.txt"))
])

ftpfactory = FTPFactory(portal)
ftpfactory.allowAnonymous = False
ftpfactory.protocol = MonitoredFTP

hafactory = HAFactory()

reactor.listenTCP(2121, ftpfactory)
reactor.listenTCP(2525, ConsoleSMTPFactory(portal))
Exemple #32
0
    def apply_logging(self):
        # We're using twisted logging only for IO.
        from twisted.logger import FileLogObserver
        from twisted.logger import Logger, LogLevel, globalLogPublisher

        LOGLEVEL_TWISTED_MAP = {
            logging.DEBUG: LogLevel.debug,
            logging.INFO: LogLevel.info,
            logging.WARN: LogLevel.warn,
            logging.ERROR: LogLevel.error,
            logging.CRITICAL: LogLevel.critical,
        }

        TWISTED_LOGLEVEL_MAP = {v: k for k, v in LOGLEVEL_TWISTED_MAP.items()}

        loggers = {}

        config = self

        class TwistedHandler(logging.Handler):
            if config.log_rss:
                if _meminfo is None:
                    def _modify_record(self, record):
                        record.msg = '[psutil?] %s' % record.msg
                else:
                    def _modify_record(self, record):
                        meminfo = _meminfo()
                        record.msg = '[%.2f] %s' % (meminfo.rss / 1024.0 ** 2,
                                                                     record.msg)
            else:
                def _modify_record(self, record):
                    pass

            def emit(self, record):
                assert isinstance(record, logging.LogRecord)

                record.l = LOGLEVEL_MAP_ABB.get(record.levelno, "?")

                self._modify_record(record)

                _logger = loggers.get(record.name, None)
                if _logger is None:
                    _logger = loggers[record.name] = Logger(record.name)

                t = self.format(record)

                if six.PY2 and isinstance(t, str):
                    # Libraries outside of our control can throw exceptions with
                    # a "str with known encoding", which can cause issues down
                    # the logging pipeline. Her
                    t = t.decode('utf8', errors='replace')

                _logger.emit(LOGLEVEL_TWISTED_MAP[record.levelno], log_text=t)

        if self.logger_dest is not None:
            from twisted.python.logfile import DailyLogFile

            class DailyLogWithLeadingZero(DailyLogFile):
                def suffix(self, tupledate):
                    # this closely imitates the same function from parent class
                    try:
                        return '-'.join(("%02d" % i for i in tupledate))
                    except:
                        # try taking a float unixtime
                        return '-'.join(("%02d" % i for i in
                                                        self.toDate(tupledate)))

            self.logger_dest = abspath(self.logger_dest)
            if access(dirname(self.logger_dest), os.R_OK | os.W_OK):
                log_dest = DailyLogWithLeadingZero \
                                                 .fromFullPath(self.logger_dest)

            else:
                Logger().warn("%r is not accessible. We need rwx on it to "
                               "rotate logs." % dirname(self.logger_dest))
                log_dest = open(self.logger_dest, 'w+')

            formatter = logging.Formatter(self.LOGGING_PROD_FORMAT)

        else:
            formatter = logging.Formatter(self.LOGGING_DEVEL_FORMAT)
            log_dest = sys.stdout

            try:
                import colorama
                colorama.init()
                logger.debug("colorama loaded.")

            except Exception as e:
                logger.debug("coloarama not loaded: %r" % e)

        def record_as_string(record):
            if 'log_failure' in record:
                failure = record['log_failure']
                try:
                    s = pformat(vars(failure.value))
                except TypeError:
                    # vars() argument must have __dict__ attribute
                    s = repr(failure.value)
                return "%s: %s" % (failure.type, s)

            if 'log_text' in record:
                return record['log_text'] + "\n"

            if 'log_format' in record:
                level = record.get('log_level', LogLevel.debug)
                level = LOGLEVEL_MAP_ABB[TWISTED_LOGLEVEL_MAP[level]]

                text = record['log_format'].format(**record) + "\n"
                ns = record.get('log_namespace', "???")
                lineno = 0
                record = logging.LogRecord('?', level, ns, lineno, text,
                                                                     None, None)
                record.l = level
                record.module = ns.split('.')[-2]

                return formatter.format(record)

            if 'log_io' in record:
                return record['log_io'] + "\n"

            if 'message' in record:
                return record['message'] + "\n"

            return pformat(record)

        observer = FileLogObserver(log_dest, record_as_string)
        globalLogPublisher.addObserver(observer)
        self._clear_other_observers(globalLogPublisher, observer)

        handler = TwistedHandler()
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)

        self.pre_logging_apply()

        for l in self._loggers or []:
            l.apply()

        return self
Exemple #33
0
def doctor_decrypt(hash_key):
    globalLogPublisher.addObserver(SimpleObserver())
    SEEDNODE_URL = 'localhost:11501'

    TEMP_DOCTOR_DIR = "{}/doctor-files".format(
        os.path.dirname(os.path.abspath(__file__)))
    shutil.rmtree(TEMP_DOCTOR_DIR, ignore_errors=True)

    ursula = Ursula.from_seed_and_stake_info(seed_uri=SEEDNODE_URL,
                                             federated_only=True,
                                             minimum_stake=0)

    from doctor_keys import get_doctor_privkeys

    doctor_keys = get_doctor_privkeys()

    bob_enc_keypair = DecryptingKeypair(private_key=doctor_keys["enc"])
    bob_sig_keypair = SigningKeypair(private_key=doctor_keys["sig"])
    enc_power = DecryptingPower(keypair=bob_enc_keypair)
    sig_power = SigningPower(keypair=bob_sig_keypair)
    power_ups = [enc_power, sig_power]

    print("Creating the Doctor ...")

    doctor = Bob(
        is_me=True,
        federated_only=True,
        crypto_power_ups=power_ups,
        start_learning_now=True,
        abort_on_learning_error=True,
        known_nodes=[ursula],
        save_metadata=False,
        network_middleware=RestMiddleware(),
    )

    print("Doctor = ", doctor)

    with open("policy-metadata.json", 'r') as f:
        policy_data = json.load(f)

    policy_pubkey = UmbralPublicKey.from_bytes(
        bytes.fromhex(policy_data["policy_pubkey"]))
    alices_sig_pubkey = UmbralPublicKey.from_bytes(
        bytes.fromhex(policy_data["alice_sig_pubkey"]))
    label = policy_data["label"].encode()

    print("The Doctor joins policy for label '{}'".format(
        label.decode("utf-8")))
    doctor.join_policy(label, alices_sig_pubkey)

    ipfs_api = ipfsapi.connect()
    file = ipfs_api.get(hash_key)
    print(file)
    os.rename(hash_key, 'patient_details.msgpack')
    data = msgpack.load(open("patient_details.msgpack", "rb"), raw=False)
    message_kits = (UmbralMessageKit.from_bytes(k) for k in data['kits'])

    data_source = DataSource.from_public_keys(
        policy_public_key=policy_pubkey,
        datasource_public_key=data['data_source'],
        label=label)
    complete_message = []
    for message_kit in message_kits:
        print(message_kit)
        try:
            start = timer()
            retrieved_plaintexts = doctor.retrieve(
                message_kit=message_kit,
                data_source=data_source,
                alice_verifying_key=alices_sig_pubkey)
            end = timer()
            plaintext = msgpack.loads(retrieved_plaintexts[0], raw=False)
            complete_message.append(plaintext)
            print(plaintext)
            #with open("details.json", "w") as write_file:
            #               json.dump(plaintext, write_file)
        except Exception as e:
            traceback.print_exc()
    with open("details.json", "w") as write_file:
        json.dump(complete_message, write_file)
    return complete_message
Exemple #34
0
 def start_json_file_logging(cls):
     globalLogPublisher.addObserver(get_json_file_observer())
Exemple #35
0
        output.send(vals)


def inputActions(inputs):
    for input in inputs:
        input.fetch(outputActions)


def shutdown():
    print("Shutdown")
    global SolaxModbusInverters
    for inverter in SolaxModbusInverters:
        inverter.shutdown()


globalLogPublisher.addObserver(analyze)

with open("config.toml") as conffile:
    global config
    config = toml.loads(conffile.read())

#     pp.pprint(config)

global outputs
outputs = []

global SolaxModbusInverters
SolaxModbusInverters = []

if 'emoncms' in config:
    print("Setting up EmonCMS")
Exemple #36
0
 def start_console_logging(cls):
     globalLogPublisher.addObserver(console_observer)
Exemple #37
0
def run_doc():

    globalLogPublisher.addObserver(SimpleObserver())

    ######################
    # Boring setup stuff #
    ######################

    SEEDNODE_URL = 'localhost:11501'

    # TODO: path joins?
    TEMP_DOCTOR_DIR = "{}/doctor-files".format(
        os.path.dirname(os.path.abspath(__file__)))

    # Remove previous demo files and create new ones
    shutil.rmtree(TEMP_DOCTOR_DIR, ignore_errors=True)

    ursula = Ursula.from_seed_and_stake_info(seed_uri=SEEDNODE_URL,
                                             federated_only=True,
                                             minimum_stake=0)

    # To create a Bob, we need the doctor's private keys previously generated.

    doctor_keys = get_doctor_privkeys()

    bob_enc_keypair = DecryptingKeypair(private_key=doctor_keys["enc"])
    bob_sig_keypair = SigningKeypair(private_key=doctor_keys["sig"])
    enc_power = DecryptingPower(keypair=bob_enc_keypair)
    sig_power = SigningPower(keypair=bob_sig_keypair)
    power_ups = [enc_power, sig_power]

    print("Creating the Doctor ...")

    doctor = Bob(
        is_me=True,
        federated_only=True,
        crypto_power_ups=power_ups,
        start_learning_now=True,
        abort_on_learning_error=True,
        known_nodes=[ursula],
        save_metadata=False,
        network_middleware=RestMiddleware(),
    )

    print("Doctor = ", doctor)

    # Let's join the policy generated by Alicia. We just need some info about it.
    with open("policy-metadata.json", 'r') as f:
        policy_data = json.load(f)

    policy_pubkey = UmbralPublicKey.from_bytes(
        bytes.fromhex(policy_data["policy_pubkey"]))
    alices_sig_pubkey = UmbralPublicKey.from_bytes(
        bytes.fromhex(policy_data["alice_sig_pubkey"]))
    label = policy_data["label"].encode()

    print("The Doctor joins policy for label '{}'".format(
        label.decode("utf-8")))
    doctor.join_policy(label, alices_sig_pubkey)

    # Now that the Doctor joined the policy in the NuCypher network,
    # he can retrieve encrypted data which he can decrypt with his private key.
    # But first we need some encrypted data!
    # Let's read the file produced by the heart monitor and unpack the MessageKits,
    # which are the individual ciphertexts.
    data = msgpack.load(open("heart_data.msgpack", "rb"), raw=False)
    message_kits = (UmbralMessageKit.from_bytes(k) for k in data['kits'])

    # The doctor also needs to create a view of the Data Source from its public keys
    data_source = Enrico.from_public_keys({SigningPower: data['data_source']},
                                          policy_encrypting_key=policy_pubkey)

    # Now he can ask the NuCypher network to get a re-encrypted version of each MessageKit.
    for message_kit in message_kits:
        try:
            start = timer()
            retrieved_plaintexts = doctor.retrieve(
                label=label,
                message_kit=message_kit,
                data_source=data_source,
                alice_verifying_key=alices_sig_pubkey)
            end = timer()

            plaintext = msgpack.loads(retrieved_plaintexts[0], raw=False)

            # Now we can get the heart rate and the associated timestamp,
            # generated by the heart rate monitor.
            heart_rate = plaintext['heart_rate']
            timestamp = maya.MayaDT(plaintext['timestamp'])

            # This code block simply pretty prints the heart rate info
            terminal_size = shutil.get_terminal_size().columns
            max_width = min(terminal_size, 120)
            columns = max_width - 12 - 27
            scale = columns / 40
            scaled_heart_rate = int(scale * (heart_rate - 60))
            retrieval_time = "Retrieval time: {:8.2f} ms".format(1000 *
                                                                 (end - start))
            line = ("-" * scaled_heart_rate) + "❤︎ ({} BPM)".format(heart_rate)
            line = line.ljust(max_width - 27, " ") + retrieval_time
            print(line)
        except Exception as e:
            # We just want to know what went wrong and continue the demo
            traceback.print_exc()
Exemple #38
0
    threadpool=thpool)
is_logging = True
is_log2file = False
if is_logging:
    if is_log2file:
        DEFAULT_LOG_FILE = os.path.join(settings.BASE_DIR,
                                        'default-hendrix.log')

        @provider(ILogObserver)
        def FileObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
            file_observer = textFileLogObserver(io.open(path, 'at'))
            return FilteringLogObserver(file_observer, [
                LogLevelFilterPredicate(log_level),
            ])

        log_path = os.path.join(settings.BASE_DIR, 'hendrix-debug.log')
        globalLogPublisher.addObserver(
            FileObserver(path=log_path, log_level=LogLevel.debug))
    else:

        @provider(ILogObserver)
        def ConsoleObserver(log_level=LogLevel.warn):
            console_observer = textFileLogObserver(sys.stdout)
            return FilteringLogObserver(console_observer, [
                LogLevelFilterPredicate(log_level),
            ])

        globalLogPublisher.addObserver(
            ConsoleObserver(log_level=LogLevel.debug))
deployer.run()
Exemple #39
0
from twisted.internet.endpoints import serverFromString
from twisted.internet.defer import gatherResults, inlineCallbacks, Deferred

from psycopg2.extras import NamedTupleCursor
from txpostgres import txpostgres

from txchatexample.util import ConnectionPool
from txchatexample.web import makeEntryPoint


@inlineCallbacks
def main(reactor, server='tcp:8000'):
    pool = ConnectionPool(dbname='asdf')
    conn = txpostgres.Connection()
    yield gatherResults([
        pool.start(),
        conn.connect(dbname='asdf', cursor_factory=NamedTupleCursor)
    ])

    res = yield makeEntryPoint(pool, conn)
    site = Site(res)

    ep = serverFromString(reactor, server)
    ep.listen(site)
    yield Deferred()  # wait forever


if __name__ == '__main__':
    globalLogPublisher.addObserver(textFileLogObserver(sys.stdout))
    react(main)
Exemple #40
0
from __future__ import print_function, absolute_import

from twisted.internet import reactor
from twisted.logger import Logger, globalLogPublisher

from cheeselib.logger import PrintingObserver
from cheeselib.server.rpc import CheeseRPCServerFactory, CheeseRPCServer
from cheeselib.server.storage.mongo import MongoDAO

SERVER_PORT = 18080

log = Logger()

globalLogPublisher.addObserver(PrintingObserver())

dao = MongoDAO()

rpc_server = CheeseRPCServer(dao).getStreamFactory(CheeseRPCServerFactory)

reactor.listenTCP(SERVER_PORT, rpc_server)
log.info("Starting server on port %d..." % SERVER_PORT)
reactor.run()
 def startService(self):
     globalLogPublisher.addObserver(_count_errors)
     return Service.startService(self)
Exemple #42
0
    """
    # The '-c' parameters is currently ignored
    optParameters = []

    optFlags = [['help', 'h', 'Display this help and exit.']]


@provider(ILogObserver)
def importFailureObserver(event):
    if 'failure' in event and event['failure'].type is ImportError:
        log.err("ERROR: %s. Please run `pip install -U -r requirements.txt` "
                "from Cowrie's install directory and virtualenv to install "
                "the new dependency" % event['failure'].value.message)


globalLogPublisher.addObserver(importFailureObserver)


@implementer(IServiceMaker, IPlugin)
class CowrieServiceMaker(object):
    tapname = "cowrie"
    description = "She sells sea shells by the sea shore."
    options = Options
    dbloggers = None
    output_plugins = None

    def makeService(self, options):
        """
        Construct a TCPServer from a factory defined in Cowrie.
        """

def main(config_path=None):
    """
    main.

    @type config_path: String
    @param config_path: optional alternative path to a config file
    """
    log.debug("Welcome to the jungle!")

    try:
        cfg = load_config(config_path) if config_path else load_config()
    except ConfigObjError, e:
        log.failure(str(e))

    site_plugins = load_site_plugins(cfg['sites'])
    db_plugin = load_database_plugin(cfg['database'])
    plugins = {'sites': site_plugins, 'database': db_plugin}
    import_data(plugins)


if __name__ == '__main__':
    log_filter = LogLevelFilterPredicate(LogLevel.info)
    all_abserver = textFileLogObserver(open("spider.log", 'w'))
    filtered_observer = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                             [log_filter])
    globalLogPublisher.addObserver(all_abserver)
    globalLogPublisher.addObserver(filtered_observer)
    sys.exit(main())
Exemple #44
0
 def start(cls):
     globalLogPublisher.addObserver(getTextFileObserver())
     cls.started = True
Exemple #45
0
def main():
    parser = argparse.ArgumentParser()

    # Network options
    group_network_container = parser.add_argument_group(title="Network options")
    group_network = group_network_container.add_mutually_exclusive_group(required=True)
    group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet")
    group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet")
    group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet")
    group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet")
    group_network.add_argument("--config", action="store", help="Use a specific config file")

    # Ports for RPC and REST api
    group_modes = parser.add_argument_group(title="Mode(s)")
    group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)")
    group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)")

    # Advanced logging setup
    group_logging = parser.add_argument_group(title="Logging options")
    group_logging.add_argument("--logfile", action="store", type=str, help="Logfile")
    group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)")
    group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').")
    group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger")

    # Where to store stuff
    parser.add_argument("--datadir", action="store",
                        help="Absolute path to use for database directories")
    # peers
    parser.add_argument("--maxpeers", action="store", default=5,
                        help="Max peers to use for P2P Joining")

    # host
    parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0")

    # Now parse
    args = parser.parse_args()
    # print(args)

    if not args.port_rpc and not args.port_rest:
        print("Error: specify at least one of --port-rpc / --port-rest")
        parser.print_help()
        return

    if args.port_rpc == args.port_rest:
        print("Error: --port-rpc and --port-rest cannot be the same")
        parser.print_help()
        return

    if args.logfile and (args.syslog or args.syslog_local):
        print("Error: Cannot only use logfile or syslog at once")
        parser.print_help()
        return

    # Setup depending on command line arguments. By default, the testnet settings are already loaded.
    if args.config:
        settings.setup(args.config)
    elif args.mainnet:
        settings.setup_mainnet()
    elif args.testnet:
        settings.setup_testnet()
    elif args.privnet:
        settings.setup_privnet()
    elif args.coznet:
        settings.setup_coznet()

    if args.datadir:
        settings.set_data_dir(args.datadir)
    if args.maxpeers:
        settings.set_max_peers(args.maxpeers)

    if args.syslog or args.syslog_local is not None:
        # Setup the syslog facility
        if args.syslog_local is not None:
            print("Logging to syslog local%s facility" % args.syslog_local)
            syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local
        else:
            print("Logging to syslog user facility")
            syslog_facility = SysLogHandler.LOG_USER

        # Setup logzero to only use the syslog handler
        logzero.syslog(facility=syslog_facility)
    else:
        # Setup file logging
        if args.logfile:
            logfile = os.path.abspath(args.logfile)
            if args.disable_stderr:
                print("Logging to logfile: %s" % logfile)
            else:
                print("Logging to stderr and logfile: %s" % logfile)
            logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr)

        else:
            print("Logging to stdout and stderr")

    # Disable logging smart contract events
    settings.set_log_smart_contract_events(False)

    # Write a PID file to easily quit the service
    write_pid_file()

    # Setup Twisted and Klein logging to use the logzero setup
    observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER)
    globalLogPublisher.addObserver(observer)

    # Instantiate the blockchain and subscribe to notifications
    blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
    Blockchain.RegisterBlockchain(blockchain)
    dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
    dbloop.start(.1)

    # Setup twisted reactor, NodeLeader and start the NotificationDB
    reactor.suggestThreadPoolSize(15)
    NodeLeader.Instance().Start()
    NotificationDB.instance().start()

    # Start a thread with custom code
    d = threading.Thread(target=custom_background_code)
    d.setDaemon(True)  # daemonizing the thread will kill it when the main thread is quit
    d.start()

    if args.port_rpc:
        logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc))
        api_server_rpc = JsonRpcApi(args.port_rpc)
#        endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host)
#        endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource()))
#        reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc))
        api_server_rpc.app.run(args.host, args.port_rpc)

    if args.port_rest:
        logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest))
        api_server_rest = RestApi()
#        endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host)
#        endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource()))
        api_server_rest.app.run(args.host, args.port_rest)

    reactor.run()

    # After the reactor is stopped, gracefully shutdown the database.
    logger.info("Closing databases...")
    NotificationDB.close()
    Blockchain.Default().Dispose()
    NodeLeader.Instance().Shutdown()
 def __attrs_post_init__(self) -> None:
     globalLogPublisher.addObserver(self.storeObserver)
Exemple #47
0
import io
from twisted.logger import jsonFileLogObserver, globalLogPublisher
from ad_hoc import AdHoc

globalLogPublisher.addObserver(jsonFileLogObserver(io.open("log.json", "a")))

AdHoc(3, 4).logMessage()
 def __enter__(self):
     set_global_log_level(self.desired_level)
     globalLogPublisher.addObserver(self.logs.append)
     return self
Exemple #49
0
    yield task.deferLater(nats_protocol.reactor, 1, reactor.stop)


def main(reactor):

    host = "demo.nats.io"
    port = 4222

    # TODO: make a NatsClient that does this, choosing the proper endpoint

    point = TCP4ClientEndpoint(reactor, host, port)

    nats_protocol = txnats.io.NatsProtocol(verbose=False,
                                           on_msg=my_on_msg,
                                           on_connect=somePubSubbing)

    # Because NatsProtocol implements the Protocol interface, Twisted's
    # connectProtocol knows how to connected to the endpoint.
    connecting = connectProtocol(point, nats_protocol)
    # Log if there is an error making the connection.
    connecting.addErrback(lambda np: log.info("{p}", p=np))
    # Log what is returned by the connectProtocol.
    connecting.addCallback(lambda np: log.info("{p}", p=np))
    return connecting


if __name__ == '__main__':
    globalLogPublisher.addObserver(simpleObserver)
    main(reactor)
    reactor.run()
Exemple #50
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument('--loglevel',
                        default="info",
                        choices=['none', 'error', 'warn', 'info', 'debug', 'trace'],
                        help='Initial log level.')

    parser.add_argument('-c',
                        '--cbdir',
                        type=str,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-n',
                        '--node',
                        type=str,
                        help='Crossbar.io node ID (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=str,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('-r',
                        '--realm',
                        type=str,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container'],
                        help='Worker type (required).')

    parser.add_argument('--title',
                        type=str,
                        default=None,
                        help='Worker process title to set (optional).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware, _stderr
    from crossbar._logging import make_logger, start_logging, set_global_log_level
    from twisted.logger import globalLogPublisher as log_publisher

    # Set the global log level
    set_global_log_level(options.loglevel)

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=_stderr)
    _stderr.flush()

    flo = make_JSON_observer(_stderr)
    log_publisher.addObserver(flo)
    start_logging()

    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        # set process title if requested to
        #
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]'
            }
            setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip())

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Worker running under {python}-{reactor}",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession
    }

    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                log.warn("Connection to node controller lost.")
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                pass
            finally:
                # losing the connection to the node controller is fatal:
                # stop the reactor and exit with error
                log.info("No more controller connection; shutting down.")
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory, "ws://localhost", debug=False, debug_wamp=False)
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {}".format(e))
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
def initCommandConfig():
    if CMD_STATUS['mode'] == 'foreground':
        globalLogPublisher.addObserver(logObserver)
    elif CMD_STATUS['mode'] == 'background':
        ignoreKeyboardInterrupt()
    optParameters = []

    optFlags = [
        ['help', 'h', 'Display this help and exit.']
    ]


@provider(ILogObserver)
def importFailureObserver(event):
    if 'failure' in event and event['failure'].type is ImportError:
        log.err("ERROR: %s. Please run `pip install -U -r requirements.txt` "
                "from Cowrie's install directory and virtualenv to install "
                "the new dependency" % event['failure'].value.message)


globalLogPublisher.addObserver(importFailureObserver)


@implementer(IServiceMaker, IPlugin)
class CowrieServiceMaker(object):
    tapname = "cowrie"
    description = "She sells sea shells by the sea shore."
    options = Options
    output_plugins = None

    def makeService(self, options):
        """
        Construct a TCPServer from a factory defined in Cowrie.
        """

        if options["help"] is True:
Exemple #53
0
import os.path

from flask import Flask, render_template
from twisted.logger import globalLogPublisher

from hendrix.deploy.base import HendrixDeploy
from hendrix.experience import hey_joe
from nucypher.characters.base import Character
from nucypher.characters.lawful import Ursula
from nucypher.config.constants import GLOBAL_DOMAIN
from nucypher.network.middleware import RestMiddleware
from nucypher.network.nodes import FleetStateTracker
from nucypher.utilities.logging import SimpleObserver

websocket_service = hey_joe.WebSocketService("127.0.0.1", 9000)
globalLogPublisher.addObserver(SimpleObserver())

known_node = Ursula.from_seed_and_stake_info(seed_uri=sys.argv[1],
                                             federated_only=True,
                                             minimum_stake=0)

rest_app = Flask("fleet-monitor", root_path=os.path.dirname(__file__))


class MonitoringTracker(FleetStateTracker):
    def record_fleet_state(self, *args, **kwargs):
        new_state_or_none = super(MonitoringTracker,
                                  self).record_fleet_state(*args, **kwargs)
        if new_state_or_none:
            checksum, new_state = new_state_or_none
            hey_joe.send({checksum: self.abridged_state_details(new_state)},
Exemple #54
0
    """
    Start a Nats Protocol and connect it to a NATS endpoint over TCP4
    which subscribes to a subject.
    """
    log.info("Start client.")
    point = TCP4ClientEndpoint(reactor, host, port)
    nats_protocol = txnats.io.NatsProtocol(
        verbose=False,
        on_connect=lambda np: np.sub("happy", 6, on_msg=on_happy_msg))

    # Because NatsProtocol implements the Protocol interface, Twisted's
    # connectProtocol knows how to connected to the endpoint.
    connecting = connectProtocol(point, nats_protocol)
    # Log if there is an error making the connection.
    connecting.addErrback(lambda np: log.info("{p}", p=np))
    # Log what is returned by the connectProtocol.
    connecting.addCallback(lambda np: log.info("{p}", p=np))
    return connecting


def main(reactor):
    host = "demo.nats.io"
    port = 4222
    create_client(reactor, host, port)

if __name__ == '__main__':
    globalLogPublisher.addObserver(simpleObserver)
    main(reactor)
    reactor.run()

Exemple #55
0
    # Start the bot
    heufybot = HeufyBot(options.config)

    # Determine the logging level
    logFilter = LogLevelFilterPredicate()
    try:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.levelWithName(options.loglevel.lower()))
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.info)
        invalidLogLevel = True

    # Set up logging
    logFile = DailyLogFile("heufybot.log", "")
    fileObserver = FileLogObserver(logFile, logFormat)
    fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter,))
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,))
    heufybot.log = Logger("heufybot")
    globalLogPublisher.addObserver(fileFilterObserver)
    globalLogPublisher.addObserver(consoleFilterObserver)

    heufybot.log.info("Starting bot...")

    # Yell at the user if they specified an invalid log level
    if invalidLogLevel:
        heufybot.log.warn("Picked up invalid log level {invalidLevel}, level has been set to INFO instead.",
                          invalidLevel=options.loglevel.lower())

    signal(SIGINT, lambda signal, stack: heufybot.shutdown())
    heufybot.startup()
Exemple #56
0
def alicia_encrypt(data_fields):
    POLICY_FILENAME = "policy-metadata.json"
    globalLogPublisher.addObserver(SimpleObserver())
    TEMP_ALICE_DIR = "alicia-files".format(os.path.dirname(os.path.abspath(__file__)))
    SEEDNODE_URL = '40.122.164.26:9151'

    passphrase = "TEST_ALICIA_INSECURE_DEVELOPMENT_PASSWORD"
    try:
        alice_config_file = os.path.join(TEMP_ALICE_DIR, "alice.config")
        new_alice_config = AliceConfiguration.from_configuration_file(
                filepath=alice_config_file,
                network_middleware=RestMiddleware(),
                start_learning_now=False,
                save_metadata=False,
            )
        alicia = new_alice_config(passphrase=passphrase)

    except:
        shutil.rmtree(TEMP_ALICE_DIR, ignore_errors=True)
        ursula = Ursula.from_seed_and_stake_info(seed_uri=SEEDNODE_URL,
                                                federated_only=True,
                                                minimum_stake=0)
        alice_config = AliceConfiguration(
            config_root=os.path.join(TEMP_ALICE_DIR),
            is_me=True,
            known_nodes={ursula},
            start_learning_now=False,
            federated_only=True,
            learn_on_same_thread=True,
        )

        alice_config.initialize(password=passphrase)

        alice_config.keyring.unlock(password=passphrase)
        alicia = alice_config.produce()
        alice_config_file = alice_config.to_configuration_file()

    alicia.start_learning_loop(now=True)
    label = "doctor"
    label = label.encode()
    policy_pubkey = alicia.get_policy_pubkey_from_label(label)

    print("The policy public key for "
        "label '{}' is {}".format(label.decode("utf-8"), policy_pubkey.to_bytes().hex()))

    import data_ipfs
    res = data_ipfs.encrypt_patient_data(policy_pubkey , data_fields, label=label,save_as_file=True)
    print(res)

    from doctor_keys import get_doctor_pubkeys
    doctor_pubkeys = get_doctor_pubkeys()

    powers_and_material = {
        DecryptingPower: doctor_pubkeys['enc'],
        SigningPower: doctor_pubkeys['sig']
    }

    doctor_strange = Bob.from_public_keys(powers_and_material=powers_and_material,
                                        federated_only=True)


    policy_end_datetime = maya.now() + datetime.timedelta(days=5)

    m, n = 2, 3
    print("Creating access policy for the Doctor...")
    policy = alicia.grant(bob=doctor_strange,
                        label=label,
                        m=m,
                        n=n,
                        expiration=policy_end_datetime)
    print("Done!")


    policy_info = {
        "policy_pubkey": policy.public_key.to_bytes().hex(),
        "alice_sig_pubkey": bytes(alicia.stamp).hex(),
        "label": label.decode("utf-8"),
    }

    filename = POLICY_FILENAME
    with open(filename, 'w') as f:
        json.dump(policy_info, f)
    
    return res
Exemple #57
0
 def start(self):
     if self._filename:
         self._output = io.open(self._filename, "a", encoding="utf-8")
     if self.firehose:
         self.firehose.start()
     globalLogPublisher.addObserver(self)
Exemple #58
0
from __future__ import absolute_import, division, print_function

import sys

from gaspocket.bot import run

from twisted.internet.task import react

from twisted.logger import globalLogPublisher, textFileLogObserver

globalLogPublisher.addObserver(textFileLogObserver(sys.stdout))

if __name__ == '__main__':
    react(run, [])
Exemple #59
0
import io

from ad_hoc import AdHoc

from twisted.logger import globalLogPublisher, jsonFileLogObserver

globalLogPublisher.addObserver(jsonFileLogObserver(open("log.json", "a")))

AdHoc(3, 4).logMessage()