def run(components, log_level='info'):
    """
    High-level API to run a series of components.

    This will only return once all the components have stopped
    (including, possibly, after all re-connections have failed if you
    have re-connections enabled). Under the hood, this calls
    :meth:`twisted.internet.reactor.run` -- if you wish to manage the
    reactor loop yourself, use the
    :meth:`autobahn.twisted.component.Component.start` method to start
    each component yourself.

    :param components: the Component(s) you wish to run
    :type components: Component or list of Components

    :param log_level: a valid log-level (or None to avoid calling start_logging)
    :type log_level: string
    """
    # only for Twisted > 12
    # ...so this isn't in all Twisted versions we test against -- need
    # to do "something else" if we can't import .. :/ (or drop some
    # support)
    from twisted.internet.task import react

    # actually, should we even let people "not start" the logging? I'm
    # not sure that's wise... (double-check: if they already called
    # txaio.start_logging() what happens if we call it again?)
    if log_level is not None:
        txaio.start_logging(level=log_level)
    react(component._run, (components, ))
def run(components, log_level='info'):
    """
    High-level API to run a series of components.

    This will only return once all the components have stopped
    (including, possibly, after all re-connections have failed if you
    have re-connections enabled). Under the hood, this calls

    XXX fixme for asyncio

    -- if you wish to manage the loop loop yourself, use the
    :meth:`autobahn.asyncio.component.Component.start` method to start
    each component yourself.

    :param components: the Component(s) you wish to run
    :type components: Component or list of Components

    :param log_level: a valid log-level (or None to avoid calling start_logging)
    :type log_level: string
    """

    # actually, should we even let people "not start" the logging? I'm
    # not sure that's wise... (double-check: if they already called
    # txaio.start_logging() what happens if we call it again?)
    if log_level is not None:
        txaio.start_logging(level=log_level)
    loop = asyncio.get_event_loop()
    log = txaio.make_logger()

    # see https://github.com/python/asyncio/issues/341 asyncio has
    # "odd" handling of KeyboardInterrupt when using Tasks (as
    # run_until_complete does). Another option is to just resture
    # default SIGINT handling, which is to exit:
    #   import signal
    #   signal.signal(signal.SIGINT, signal.SIG_DFL)

    @asyncio.coroutine
    def exit():
        return loop.stop()

    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)
        for task in asyncio.Task.all_tasks():
            task.cancel()
        asyncio.ensure_future(exit())

    loop.add_signal_handler(signal.SIGINT, partial(nicely_exit, 'SIGINT'))
    loop.add_signal_handler(signal.SIGTERM, partial(nicely_exit, 'SIGTERM'))

    # returns a future; could run_until_complete() but see below
    component._run(loop, components)

    try:
        loop.run_forever()
        # this is probably more-correct, but then you always get
        # "Event loop stopped before Future completed":
        # loop.run_until_complete(f)
    except asyncio.CancelledError:
        pass
Ejemplo n.º 3
0
def log_started(framework):
    """
    Sets up the logging, which we can only do once per run.
    """
    early_log = txaio.make_logger()
    early_log.info("early log")

    txaio.start_logging(out=_handler, level='debug')
Ejemplo n.º 4
0
def log_started():
    """
    Sets up the logging, which we can only do once per run.
    """
    early_log = txaio.make_logger()
    early_log.info("early log")

    handler = TestHandler()
    txaio.start_logging(out=handler, level='debug')
    return handler
Ejemplo n.º 5
0
def main():
    args, _ = get_args()

    if args.debug:
        txaio.start_logging(level='debug')
    else:
        txaio.start_logging(level='info')

    # create and start app runner for our app component ..
    extra = {"args": args}
    runner = ApplicationRunner(url=args.router, realm=args.realm, extra=extra)
    runner.run(SubscriptionPrinter, auto_reconnect=True)
Ejemplo n.º 6
0
def init_logger():
    """Set up logging.

    Uses standard Python logging module, although will set up
    ``txaio`` if ``logging.txaio`` is True in the configuration.

    """
    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')

    if get('logging.txaio', False):
        import txaio
        txaio.start_logging()
Ejemplo n.º 7
0
def setup_logging(verbose, logfile=None):
    root_logger = logging.getLogger()
    formatter = logging.Formatter(LOG_FORMAT)
    streamhandler = logging.StreamHandler()
    streamhandler.setFormatter(formatter)
    root_logger.addHandler(streamhandler)

    if logfile:
        filehandler = logging.FileHandler(logfile)
        filehandler.setFormatter(formatter)
        root_logger.addHandler(filehandler)

    root_logger.setLevel(logging.DEBUG if verbose else logging.INFO)

    # They use txaio's logging. We use the logging module.
    # They should interplay just fine, but it's nice to be explicit.
    txaio.start_logging(level='debug' if verbose else 'info')
Ejemplo n.º 8
0
    def __init__(self, host):

        signal.signal(signal.SIGINT, self.stop_handler)

        logging.register_options(CONF)

        CONF(project='iotronic')
        logging.setup(CONF, "iotronic-wamp-agent")

        if CONF.debug:
            txaio.start_logging(level="debug")

        # to be removed asap
        self.host = host
        self.dbapi = dbapi.get_instance()

        try:
            wpa = self.dbapi.register_wampagent(
                {'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})

        except exception.WampAgentAlreadyRegistered:
            LOG.warn(_LW("A wampagent with hostname %(hostname)s "
                         "was previously registered. Updating registration"),
                     {'hostname': self.host})

        wpa = self.dbapi.register_wampagent(
            {'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
            update_existing=True)
        self.wampagent = wpa
        self.wampagent.ragent = CONF.wamp.register_agent
        self.wampagent.save()

        global AGENT_HOST
        AGENT_HOST = self.host

        self.r = RPCServer()
        self.w = WampManager()

        self.r.start()
        self.w.start()
Ejemplo n.º 9
0
def main(reactor):
    component = Component(
        transports=u"ws://localhost:8080/ws",
        realm=u"crossbardemo",
    )
    app = Klein()
    webapp = WebApplication(app, component)

    # have our Web site listen on 8090
    site = Site(app.resource())
    server_ep = TCP4ServerEndpoint(reactor, 8090)
    port = yield server_ep.listen(site)
    print("Web application on {}".format(port))

    # we don't *have* to hand over control of the reactor to
    # component.run -- if we don't want to, we call .start()
    # The Deferred it returns fires when the component is "completed"
    # (or errbacks on any problems).
    comp_d = component.start(reactor)

    # When not using run() we also must start logging ourselves.
    import txaio
    txaio.start_logging(level='info')

    # If the Component raises an exception we want to exit. Note that
    # things like failing to connect will be swallowed by the
    # re-connection mechanisms already so won't reach here.

    def _failed(f):
        print("Component failed: {}".format(f))
        done.errback(f)
    comp_d.addErrback(_failed)

    # wait forever (unless the Component raises an error)
    done = Deferred()
    yield done
Ejemplo n.º 10
0
def run(components, start_loop=True, log_level='info'):
    """
    High-level API to run a series of components.

    This will only return once all the components have stopped
    (including, possibly, after all re-connections have failed if you
    have re-connections enabled). Under the hood, this calls

    XXX fixme for asyncio

    -- if you wish to manage the loop yourself, use the
    :meth:`autobahn.asyncio.component.Component.start` method to start
    each component yourself.

    :param components: the Component(s) you wish to run
    :type components: instance or list of :class:`autobahn.asyncio.component.Component`

    :param start_loop: When ``True`` (the default) this method
        start a new asyncio loop.
    :type start_loop: bool

    :param log_level: a valid log-level (or None to avoid calling start_logging)
    :type log_level: string
    """

    # actually, should we even let people "not start" the logging? I'm
    # not sure that's wise... (double-check: if they already called
    # txaio.start_logging() what happens if we call it again?)
    if log_level is not None:
        txaio.start_logging(level=log_level)
    loop = asyncio.get_event_loop()
    if loop.is_closed():
        asyncio.set_event_loop(asyncio.new_event_loop())
        loop = asyncio.get_event_loop()
        txaio.config.loop = loop
    log = txaio.make_logger()

    # see https://github.com/python/asyncio/issues/341 asyncio has
    # "odd" handling of KeyboardInterrupt when using Tasks (as
    # run_until_complete does). Another option is to just resture
    # default SIGINT handling, which is to exit:
    #   import signal
    #   signal.signal(signal.SIGINT, signal.SIG_DFL)

    @asyncio.coroutine
    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)

        tasks = asyncio.Task.all_tasks()
        for task in tasks:
            # Do not cancel the current task.
            if task is not asyncio.Task.current_task():
                task.cancel()

        def cancel_all_callback(fut):
            try:
                fut.result()
            except asyncio.CancelledError:
                log.debug("All task cancelled")
            except Exception as e:
                log.error("Error while shutting down: {exception}", exception=e)
            finally:
                loop.stop()

        fut = asyncio.gather(*tasks)
        fut.add_done_callback(cancel_all_callback)

    try:
        loop.add_signal_handler(signal.SIGINT, lambda: asyncio.ensure_future(nicely_exit("SIGINT")))
        loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.ensure_future(nicely_exit("SIGTERM")))
    except NotImplementedError:
        # signals are not available on Windows
        pass

    def done_callback(loop, arg):
        loop.stop()

    # returns a future; could run_until_complete() but see below
    component._run(loop, components, done_callback)

    if start_loop:
        try:
            loop.run_forever()
            # this is probably more-correct, but then you always get
            # "Event loop stopped before Future completed":
            # loop.run_until_complete(f)
        except asyncio.CancelledError:
            pass
        # finally:
        #     signal.signal(signal.SIGINT, signal.SIG_DFL)
        #     signal.signal(signal.SIGTERM, signal.SIG_DFL)

        # Close the event loop at the end, otherwise an exception is
        # thrown. https://bugs.python.org/issue23548
        loop.close()
Ejemplo n.º 11
0
               help=('autoPingInterval parameter for wamp')),

]

proxy_opts = [
    cfg.StrOpt('proxy',
               choices=[('nginx', _('nginx proxy')), ],
               help=_('Proxy for webservices')),
]

CONF = cfg.CONF
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(proxy_opts)
CONF.register_opts(wamp_opts, 'wamp')

txaio.start_logging(level="info")

wamp_session_caller = None
AGENT_HOST = None
LOOP = None
connected = False


async def wamp_request(kwarg):
    LOG.debug("calling: " + kwarg['wamp_rpc_call'])
    d = await wamp_session_caller.call(kwarg['wamp_rpc_call'], *kwarg['data'])
    return d


# OSLO ENDPOINT
class WampEndpoint(object):
Ejemplo n.º 12
0
    def run(self,
            make,
            start_reactor=True,
            auto_reconnect=False,
            log_level='info'):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_reactor: When ``True`` (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.

        :returns: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor
            txaio.start_logging(level=log_level)

        if callable(make):
            # factory for use ApplicationSession
            def create():
                cfg = ComponentConfig(self.realm, self.extra)
                try:
                    session = make(cfg)
                except Exception:
                    self.log.failure(
                        'ApplicationSession could not be instantiated: {log_failure.value}'
                    )
                    if start_reactor and reactor.running:
                        reactor.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith(u'rs'):
            # try to parse RawSocket URL ..
            isSecure, host, port = parse_rs_url(self.url)

            # use the first configured serializer if any (which means, auto-choose "best")
            serializer = self.serializers[0] if self.serializers else None

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(
                create, serializer=serializer)

        else:
            # try to parse WebSocket URL ..
            isSecure, host, port, resource, path, params = parse_ws_url(
                self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(
                create,
                url=self.url,
                serializers=self.serializers,
                proxy=self.proxy,
                headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server ..
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage_delate responses from the server ..
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # set WebSocket options for all client connections
            transport_factory.setProtocolOptions(
                maxFramePayloadSize=1048576,
                maxMessagePayloadSize=1048576,
                autoFragmentSize=65536,
                failByDrop=False,
                openHandshakeTimeout=25.,
                closeHandshakeTimeout=1.,
                tcpNoDelay=True,
                autoPingInterval=10.,
                autoPingTimeout=25.,
                autoPingSize=4,
                perMessageCompressionOffers=offers,
                perMessageCompressionAccept=accept)

        # supress pointless log noise
        transport_factory.noisy = False

        # if user passed ssl= but isn't using isSecure, we'll never
        # use the ssl argument which makes no sense.
        context_factory = None
        if self.ssl is not None:
            if not isSecure:
                raise RuntimeError(
                    'ssl= argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            context_factory = self.ssl
        elif isSecure:
            from twisted.internet.ssl import optionsForClientTLS
            context_factory = optionsForClientTLS(host)

        from twisted.internet import reactor
        if self.proxy is not None:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, self.proxy['host'],
                                        self.proxy['port'])
            transport_factory.contextFactory = context_factory
        elif isSecure:
            from twisted.internet.endpoints import SSL4ClientEndpoint
            assert context_factory is not None
            client = SSL4ClientEndpoint(reactor, host, port, context_factory)
        else:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, host, port)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            self._connect_successes += 1
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        use_service = False
        if auto_reconnect:
            try:
                # since Twisted 16.1.0
                from twisted.application.internet import ClientService
                from twisted.application.internet import backoffPolicy
                use_service = True
            except ImportError:
                use_service = False

        if use_service:
            # this code path is automatically reconnecting ..
            self.log.debug('using t.a.i.ClientService')

            default_retry = backoffPolicy()

            if False:
                # retry policy that will only try to reconnect if we connected
                # successfully at least once before (so it fails on host unreachable etc ..)
                def retry(failed_attempts):
                    if self._connect_successes > 0:
                        return default_retry(failed_attempts)
                    else:
                        self.stop()
                        return 100000000000000
            else:
                retry = default_retry

            self._client_service = ClientService(client,
                                                 transport_factory,
                                                 retryPolicy=retry)
            self._client_service.startService()

            d = self._client_service.whenConnected()

        else:
            # this code path is only connecting once!
            self.log.debug('using t.i.e.connect()')

            d = client.connect(transport_factory)

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()

            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if we exited due to a connection error, raise that to the
            # caller
            if connect_error.exception:
                raise connect_error.exception

        else:
            # let the caller handle any errors
            return d
Ejemplo n.º 13
0
def run(components, log_level='info'):
    """
    High-level API to run a series of components.

    This will only return once all the components have stopped
    (including, possibly, after all re-connections have failed if you
    have re-connections enabled). Under the hood, this calls

    XXX fixme for asyncio

    -- if you wish to manage the loop loop yourself, use the
    :meth:`autobahn.asyncio.component.Component.start` method to start
    each component yourself.

    :param components: the Component(s) you wish to run
    :type components: Component or list of Components

    :param log_level: a valid log-level (or None to avoid calling start_logging)
    :type log_level: string
    """

    # actually, should we even let people "not start" the logging? I'm
    # not sure that's wise... (double-check: if they already called
    # txaio.start_logging() what happens if we call it again?)
    if log_level is not None:
        txaio.start_logging(level=log_level)
    loop = asyncio.get_event_loop()
    if loop.is_closed():
        asyncio.set_event_loop(asyncio.new_event_loop())
        loop = asyncio.get_event_loop()
        txaio.config.loop = loop
    log = txaio.make_logger()

    # see https://github.com/python/asyncio/issues/341 asyncio has
    # "odd" handling of KeyboardInterrupt when using Tasks (as
    # run_until_complete does). Another option is to just resture
    # default SIGINT handling, which is to exit:
    #   import signal
    #   signal.signal(signal.SIGINT, signal.SIG_DFL)

    @asyncio.coroutine
    def exit():
        return loop.stop()

    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)
        for task in asyncio.Task.all_tasks():
            task.cancel()
        asyncio.ensure_future(exit())

    loop.add_signal_handler(signal.SIGINT, partial(nicely_exit, 'SIGINT'))
    loop.add_signal_handler(signal.SIGTERM, partial(nicely_exit, 'SIGTERM'))

    def done_callback(loop, arg):
        loop.stop()

    # returns a future; could run_until_complete() but see below
    component._run(loop, components, done_callback)

    try:
        loop.run_forever()
        # this is probably more-correct, but then you always get
        # "Event loop stopped before Future completed":
        # loop.run_until_complete(f)
    except asyncio.CancelledError:
        pass
    # finally:
    #     signal.signal(signal.SIGINT, signal.SIG_DFL)
    #     signal.signal(signal.SIGTERM, signal.SIG_DFL)

    # Close the event loop at the end, otherwise an exception is
    # thrown. https://bugs.python.org/issue23548
    loop.close()
Ejemplo n.º 14
0
    def run(self, make):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable
        """
        # 1) factory for use ApplicationSession
        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception:
                self.log.failure("App session could not be created! ")
                asyncio.get_event_loop().stop()
            else:
                return session

        isSecure, host, port, resource, path, params = parse_url(self.url)

        if self.ssl is None:
            ssl = isSecure
        else:
            if self.ssl and not isSecure:
                raise RuntimeError(
                    'ssl argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            ssl = self.ssl

        # 2) create a WAMP-over-WebSocket transport client factory
        transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers)

        # 3) start the client
        loop = asyncio.get_event_loop()
        txaio.use_asyncio()
        txaio.config.loop = loop
        coro = loop.create_connection(transport_factory, host, port, ssl=ssl)
        (transport, protocol) = loop.run_until_complete(coro)

        # start logging
        txaio.start_logging(level='info')

        try:
            loop.add_signal_handler(signal.SIGTERM, loop.stop)
        except NotImplementedError:
            # signals are not available on Windows
            pass

        # 4) now enter the asyncio event loop
        try:
            loop.run_forever()
        except KeyboardInterrupt:
            # wait until we send Goodbye if user hit ctrl-c
            # (done outside this except so SIGTERM gets the same handling)
            pass

        # give Goodbye message a chance to go through, if we still
        # have an active session
        if protocol._session:
            loop.run_until_complete(protocol._session.leave())

        loop.close()
Ejemplo n.º 15
0
Archivo: wamp.py Proyecto: goks/Winky
    def run(self, make, start_reactor=True):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_reactor: if True (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.

        :returns: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor

            if self.debug or self.debug_app:
                txaio.start_logging(level='debug')
            else:
                txaio.start_logging(level='info')

        isSecure, host, port, resource, path, params = parseWsUrl(self.url)

        # factory for use ApplicationSession
        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception as e:
                if start_reactor:
                    # the app component could not be created .. fatal
                    self.log.error(str(e))
                    reactor.stop()
                else:
                    # if we didn't start the reactor, it's up to the
                    # caller to deal with errors
                    raise
            else:
                session.debug_app = self.debug_app
                return session

        # create a WAMP-over-WebSocket transport client factory
        transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers,
                                                       proxy=self.proxy, debug=self.debug)

        # supress pointless log noise like
        # "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
        transport_factory.noisy = False

        # if user passed ssl= but isn't using isSecure, we'll never
        # use the ssl argument which makes no sense.
        context_factory = None
        if self.ssl is not None:
            if not isSecure:
                raise RuntimeError(
                    'ssl= argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            context_factory = self.ssl
        elif isSecure:
            from twisted.internet.ssl import optionsForClientTLS
            context_factory = optionsForClientTLS(host)

        from twisted.internet import reactor
        if self.proxy is not None:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
            transport_factory.contextFactory = context_factory
        elif isSecure:
            from twisted.internet.endpoints import SSL4ClientEndpoint
            assert context_factory is not None
            client = SSL4ClientEndpoint(reactor, host, port, context_factory)
        else:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, host, port)

        d = client.connect(transport_factory)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()
            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if we exited due to a connection error, raise that to the
            # caller
            if connect_error.exception:
                raise connect_error.exception

        else:
            # let the caller handle any errors
            return d
Ejemplo n.º 16
0
from autobahn.twisted.websocket import connectWS, WebSocketClientFactory, WebSocketClientProtocol
from autobahn.websocket.compress import (
    PerMessageDeflateOffer,
    PerMessageDeflateResponse,
    PerMessageDeflateResponseAccept,
)
from requests import request
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from txaio import start_logging, use_twisted

use_twisted()

start_logging(level='debug')


class DiffusionClient(object):

    _USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/5.0 (KHTML, like Gecko) Chrome/5.0 Safari/5.0'

    def __init__(self,
                 connection_url,
                 trigger,
                 session_url=None,
                 protocol=None,
                 headers={},
                 topics=[]):
        self._message_handler = self
        self._connection_url = connection_url
        self._trigger = trigger
Ejemplo n.º 17
0
def run(main=None, parser=None):
    # parse command line arguments
    parser = parser or argparse.ArgumentParser()
    parser.add_argument('--debug',
                        dest='debug',
                        action='store_true',
                        default=False,
                        help='Enable logging at level "debug".')
    parser.add_argument(
        '--url',
        dest='url',
        type=str,
        default=u'wss://fabric.crossbario.com',
        help='The Crossbar.io Fabric Center (CFC) WebSocket URL '
        '(default: wss://fabric.crossbario.com')
    parser.add_argument('--realm',
                        dest='realm',
                        type=str,
                        help='The management realm to join on CFC')
    parser.add_argument(
        '--keyfile',
        dest='keyfile',
        type=str,
        default=u'~/.cbf/default.priv',
        help='The private client key file to use for authentication.')
    parser.add_argument('--authmethod',
                        dest='authmethod',
                        type=str,
                        default=u'cryptosign',
                        help='Authentication method: cryptosign or anonymous')
    args = parser.parse_args()

    if args.debug:
        txaio.start_logging(level='debug')
    else:
        txaio.start_logging(level='info')

    extra = None
    if args.authmethod == u'cryptosign':

        # for authenticating the management client, we need a Ed25519 public/private key pair
        # here, we are reusing the user key - so this needs to exist before
        privkey_file = os.path.expanduser(args.keyfile)
        privkey_hex = None
        user_id = None

        if not os.path.exists(privkey_file):
            raise Exception(
                'private key file {} does not exist'.format(privkey_file))
        else:
            with open(privkey_file, 'r') as f:
                data = f.read()
                for line in data.splitlines():
                    if line.startswith('private-key-ed25519'):
                        privkey_hex = line.split(':')[1].strip()
                    if line.startswith('user-id'):
                        user_id = line.split(':')[1].strip()

        if privkey_hex is None:
            raise Exception('no private key found in keyfile!')

        if user_id is None:
            raise Exception('no user ID found in keyfile!')

        key = cryptosign.SigningKey.from_key_bytes(
            binascii.a2b_hex(privkey_hex))

        extra = {
            u'args': args,
            u'key': key,
            u'authid': user_id,
            u'main': main,
            u'return_code': None
        }

    elif args.authmethod == u'anonymous':

        extra = {u'args': args, u'main': main, u'return_code': None}

    else:
        raise Exception('logic error')

    runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra)

    if args.authmethod == u'cryptosign':
        runner.run(ManagementClientSession)
    elif args.authmethod == u'anonymous':
        runner.run(ManagementAnonymousClientSession)
    else:
        raise Exception('logic error')

    return_code = extra[u'return_code']
    if isinstance(return_code, int) and return_code != 0:
        sys.exit(return_code)
Ejemplo n.º 18
0
def _run_command_exec_worker(options, reactor=None, personality=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes
    MEASURING_COVERAGE = False
    if 'COVERAGE_PROCESS_START' in os.environ:
        try:
            import coverage
        except ImportError:
            pass
        else:
            # The following will read the environment variable COVERAGE_PROCESS_START,
            # and that should be set to the .coveragerc file:
            #
            #   export COVERAGE_PROCESS_START=${PWD}/.coveragerc
            #
            coverage.process_startup()
            MEASURING_COVERAGE = True

    # we use an Autobahn utility to import the "best" available Twisted reactor
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # make sure logging to something else than stdio is setup _first_
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.
    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # get personality klass, eg "crossbar.personality.Personality"
    l = options.personality.split('.')
    personality_module, personality_klass = '.'.join(l[:-1]), l[-1]

    # now load the personality module and class
    _mod = importlib.import_module(personality_module)
    Personality = getattr(_mod, personality_klass)

    # get worker klass, eg "crossbar.worker.container.ContainerController"
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Starting worker "{worker_id}" for node "{node_id}" with personality "{personality}" {worker_class}',
        worker_id=options.worker,
        node_id=options.node,
        personality=Personality.NAME,
        worker_class=hltype(klass),
    )
    log.info(
        'Running as PID {pid} on {python}-{reactor}',
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )
    if MEASURING_COVERAGE:
        log.info(hl('Code coverage measurements enabled (coverage={coverage_version}).', color='green', bold=True),
                 coverage_version=coverage.__version__)

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle(
                'crossbar-worker [{}]'.format(options.klass)
            )

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}", reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # define a WAMP application session factory
        #
        from autobahn.wamp.types import ComponentConfig

        def make_session():
            session_config = ComponentConfig(realm=options.realm, extra=options)
            session = klass(config=session_config, reactor=reactor, personality=Personality)
            return session

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(make_session, u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

            vmprof.disable()
        else:
            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 19
0
    def run(self, make, logging_level='info'):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable
        """

        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception:
                self.log.failure("App session could not be created! ")
                asyncio.get_event_loop().stop()
            else:
                return session

        parsed_url = urlparse(self.url)

        if parsed_url.scheme == 'tcp':
            is_unix = False
            if not parsed_url.hostname or not parsed_url.port:
                raise ValueError('Host and port is required in URL')
        elif parsed_url.scheme == 'unix' or parsed_url.scheme == '':
            is_unix = True
            if not parsed_url.path:
                raise ValueError('Path to unix socket must be in URL')

        transport_factory = WampRawSocketClientFactory(create, serializer=self.serializer)

        loop = asyncio.get_event_loop()
        if logging_level == 'debug':
            loop.set_debug(True)
        txaio.use_asyncio()
        txaio.config.loop = loop

        try:
            loop.add_signal_handler(signal.SIGTERM, loop.stop)
        except NotImplementedError:
            # signals are not available on Windows
            pass

        def handle_error(loop, context):
            self.log.error('Application Error: {err}', err=context)
            loop.stop()

        loop.set_exception_handler(handle_error)

        if is_unix:
            coro = loop.create_unix_connection(transport_factory, parsed_url.path)
        else:
            coro = loop.create_connection(transport_factory, parsed_url.hostname, parsed_url.port)
        (_transport, protocol) = loop.run_until_complete(coro)

        txaio.start_logging(level=logging_level)  # @UndefinedVariable

        try:
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        self.log.debug('Left main loop waiting for completion')
        # give Goodbye message a chance to go through, if we still
        # have an active session
        # it's not working now - because protocol is_closed must return Future
        if protocol._session:
            loop.run_until_complete(protocol._session.leave())

        loop.close()
Ejemplo n.º 20
0
def run(options, reactor=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")

    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # eg: crossbar.worker.container.ContainerWorkerSession
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Started {worker_title} worker "{worker_id}" on node "{node_id}" [{klass} / {python}-{reactor}]',
        worker_title=klass.WORKER_TITLE,
        klass=options.klass,
        node_id=options.node,
        worker_id=options.worker,
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):
        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}",
                             reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit,
                                              exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = klass

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory,
                                                       u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 21
0
def main(reactor):
    # Because we're using a self-signed certificate, we need to tell Twisted
    # that it is OK to trust it.
    cert_fname = (".crossbar/server_cert.pem")
    cert = crypto.load_certificate(crypto.FILETYPE_PEM,
                                   six.u(open(cert_fname, 'r').read()))
    opt = ssl.CertificateOptions(\
      trustRoot=OpenSSLCertificateAuthorities([cert]))

    # Set up our sisock component.
    component = Component(
        transports=[{
            u"type": u"websocket",
            u"url": sisock.base.WAMP_URI,
            u"endpoint": {
                u"type": u"tcp",
                u"host": sisock.base.SISOCK_HOST,
                u"port": sisock.base.SISOCK_PORT,
                u"tls": opt
            }
        }],
        authentication={
            u"wampcra": {
                u"authid": u"simonsobs",
                u"secret": u"yW4V2T^bPD&rGFwy"
            }
        },
        realm=sisock.base.REALM,
    )

    # Create our klein webserver, and then our datasource (which also connects
    # our component to the WAMP server).
    app = Klein()
    GrafanaSisockDatasrc(app, component)

    # Have our webserver listen for Grafana requests.
    # TODO: use SSL and authentication.
    site = Site(app.resource())
    #server_ep = SSL4ServerEndpoint(reactor, klein_port, opt)
    server_ep = TCP4ServerEndpoint(reactor, klein_port)
    port = yield server_ep.listen(site)
    print("Web application on {}".format(port))

    # We don't *have* to hand over control of the reactor to
    # component.run -- if we don't want to, we call .start()
    # The Deferred it returns fires when the component is "completed"
    # (or errbacks on any problems).
    comp_d = component.start(reactor)

    # When not using run() we also must start logging ourselves.
    txaio.start_logging(level=environ.get("LOGLEVEL", "info"))

    # If the Component raises an exception we want to exit. Note that
    # things like failing to connect will be swallowed by the
    # re-connection mechanisms already so won't reach here.

    def _failed(f):
        print("Component failed: {}".format(f))
        done.errback(f)

    comp_d.addErrback(_failed)

    # Wait forever (unless the Component raises an error).
    done = Deferred()
    yield done
Ejemplo n.º 22
0
def test_double_start(handler, framework):
    try:
        txaio.start_logging()
    except RuntimeError:
        assert False, "shouldn't get exception"
class AppSession(ApplicationSession):

    fdb = FlyCircuitDB()
    log = Logger()
    txaio.start_logging(level='debug')
    feedback_log = Logger(observer=jsonFileLogObserver(
        io.open(
            os.path.join(os.path.dirname(__file__), 'logs',
                         'feedback_log.json'), 'a+b')),
                          namespace='feedback')
    #log = Logger(observer=jsonFileLogObserver(io.open("processor_log.json", "a")), namespace="saver")

    @inlineCallbacks
    def onJoin(self, details):

        # Processor Data
        directory = {
            'processor': {
                'autobahn': autobahn.__version__,
                'version': __version__
            },
            'nlp': {},
            'na': {},
            'nk': {}
        }

        # Email notification
        # email_dict = {
        #    "sender": "*****@*****.**",
        #    "recipients": {"John Doe": "*****@*****.**",
        #                   "Jane Who": "*****@*****.**"
        #    }
        # }
        email_dict = None
        try:
            with open('email_dict.json') as data_file:
                email_dict = json.load(data_file)
                self.log.info("Loading Email Dictionary with keys, {keys}",
                              keys=list(email_dict.keys()))
        except:
            self.log.warn(
                "Loading Email Dictionary failed, no email notification on session leave."
            )

        # Memory Management
        @inlineCallbacks
        def trigger_memory_management():
            yield self.publish(six.u('ffbo.processor.memory_manager'))
            self.log.info(
                'Memory Management ping: ffbo.processor.memory_manager')

        lc = task.LoopingCall(trigger_memory_management)
        interval = 60 * self.config.extra['clean_interval']  # in mins
        lc.start(interval)

        def get_process_info():
            # hacky way to determine whether process is inside docker
            try:
                cgroup = {}
                with open('/proc/1/cgroup') as f:
                    for line in f:
                        field = line.split(':')
                        if len(field) == 3:
                            cgroup[field[1]] = field[2]
                docker_cid = 'not inside docker'
                if cgroup.has_key('cpu'):
                    field = cgroup['cpu'].strip().split('/')
                    if len(field) > 2 and field[1] == 'docker':
                        docker_cid = field[2]
            except:
                docker_cid = 'cat not determine whether inside docker or not'

            # get process pid
            try:
                pid = os.getpid()
            except:
                pid = None

            # get host IP, messy code for cross platform support
            try:
                hostip = [
                    ip
                    for ip in socket.gethostbyname_ex(socket.gethostname())[2]
                    if not ip.startswith("127.")
                ][:1]
                hostip = hostip or [[
                    (s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                    for s in
                    [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                ][0][1]]
                hostip = hostip[0]
            except:
                hostip = None

            process_info = "\n\n" \
                           "processor_component pid: {pid}\n" \
                           "docker container id: {cid}\n" \
                           "host ip address: {hip}\n".format(pid=str(pid),cid=str(docker_cid),hip=str(hostip))
            return process_info

        def send_email(text, subject=None, verbose=True):
            try:
                sender = email_dict['sender']
                recipients = [
                    '%s <%s>' % (k, v)
                    for k, v in email_dict['recipients'].items()
                ]
            except:
                return "Incorrect email dictionary format"

            if verbose:
                text += get_process_info()

            msg = MIMEText(text)

            msg['Subject'] = '[FFBO Server] ' + subject
            msg['From'] = sender
            msg['To'] = ','.join(recipients)

            gmail_user = ""
            gmail_pwd = ""

            try:
                s = smtplib.SMTP_SSL("smtp.gmail.com", 465)
                s.login(gmail_user, gmail_pwd)
            except Exception as e:
                print(e)
                return "Failed to connect to SMTP server on gmail"
            try:
                s.sendmail(sender, recipients, msg.as_string())
            except:
                return "Failed to send out email"
            s.quit()
            return "successfully sent out email"

        def feedback_error(request, message, exception=None):
            info = {
                'error': {
                    'request': str(request),
                    'message': str(message),
                    'exception': str(exception)
                }
            }
            return info

        def feedback_success(request, result):
            info = {
                'success': {
                    'request': str(request),
                    'result': result,
                }
            }
            return info

        @inlineCallbacks  # Progressive calls
        def process_nlp_query(request, details=None):
            """
                Process a nlp request, this request should have
                user:       session_id
                servers: {  nlp: session_id,
                            na : session_id,
                            vis: session_id
                         }
                nlp_query:  string
            """
            request['user'] = details.caller
            #user_details = yield self.call('ffbo.auth_server.get_user',details.caller)
            user_details = yield self.call(six.u('ffbo.auth_server.get_user'),
                                           details.caller)
            if user_details: request['username'] = user_details['username']
            feedback = []
            self.log.info(
                "process_nlp_query() accessed with request: {request}",
                request=request)
            # Validate 3 each server is defined
            try:
                #build up server calls
                rpc_calls = {}
                for stype in ['nlp', 'na']:
                    rpc_calls[stype] = six.u("ffbo.%(s_type)s.query.%(s_id)s" % \
                    {'s_id':    request['servers'][stype],
                     's_type':  stype})
                rpc_calls['user_msg'] = six.u("ffbo.ui.receive_msg.%(s_id)s" % \
                                        {'s_id': request['user']})
            except Exception as e:
                self.log.warn(
                    "process_nlp_query() failed due to incomplete server list in {servers}",
                    servers=str(request['servers']))
                feedback = feedback_error(request,
                                          "Server list not fully defined", e)
                returnValue(feedback)

            # Natural Language Processing
            try:
                self.log.info(
                    "process_nlp_query() accessed on NLP server {server_id} with query: {query}",
                    server_id=rpc_calls['nlp'],
                    query=request['nlp_query'])

                language = "en"
                if "language" in request:
                    language = request["language"]

                nlp_res = yield self.call(rpc_calls['nlp'],
                                          request['nlp_query'], language)
                self.log.info(
                    "process_nlp_query() accessed on NLP server {server_id} with result: {result}",
                    server_id=rpc_calls['nlp'],
                    result=nlp_res)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NLP server {server_id}, with error {e}",
                    server_id=rpc_calls['nlp'],
                    e=e)

                feedback = feedback_error(request, "Unable to contact server",
                                          e)
                returnValue(feedback)

            try:
                if nlp_res == {}:
                    yield self.call(rpc_calls['user_msg'], {
                        'info': {
                            'error': 'NLP module could not parse your input'
                        }
                    })
                    self.log.warn(
                        "{server_id} failed to parse query: {query}}",
                        server_id=rpc_calls['nlp'],
                        query=request['nlp_query'])
                    returnValue(None)
                yield self.call(
                    rpc_calls['user_msg'], {
                        'info': {
                            'success':
                            'NLP module successfully parsed your input'
                        }
                    })
                nlp_res['user_msg'] = rpc_calls['user_msg']
                for key in request:
                    if key not in nlp_res: nlp_res[key] = request[key]

                nlp_res['user'] = request['user']
                # HARD CODE morphology if not specified
                nlp_res['format'] = 'morphology' if not 'format' in request \
                                    else request['format']

                if 'verb' not in nlp_res or nlp_res['verb'] == 'show':
                    yield self.call(rpc_calls['user_msg'],
                                    {'commands': {
                                        'reset': ''
                                    }})

                na_res = yield self.call(rpc_calls['na'], nlp_res)
                if not na_res:
                    yield self.call(rpc_calls['user_msg'], {
                        'info': {
                            'error': 'Failed to execute query on Neuroarch'
                        }
                    })
                    returnValue(None)
                else:
                    if not 'format' in request:
                        request['format'] = 'morphology'
                    msg = {
                        'info': {
                            'success':
                            'Finished fetching all results from NeuroArch'
                        }
                    }
                    self.call(rpc_calls['user_msg'], msg)
                    returnValue(na_res)
            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NA server {server_id}, with error {e}",
                    server_id=rpc_calls['na'],
                    e=e)
                traceback.print_exc()
                yield self.call(
                    rpc_calls['user_msg'],
                    {'info': {
                        'error': 'Unable to contact NeuroArch server'
                    }})
                returnValue(None)

        yield self.register(process_nlp_query,
                            six.u('ffbo.processor.nlp_to_visualise'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("procedure process_nlp_query registered")

        @inlineCallbacks  # Progressive calls
        def process_nk_request(request, details=None):
            """

            """
            request['user'] = details.caller
            user_details = yield self.call(six.u('ffbo.auth_server.get_user'),
                                           details.caller)
            if user_details: request['username'] = user_details['username']
            feedback = []
            self.log.debug(
                "process_nk_request() accessed with request: {request}",
                request=request)
            # Validate 3 each server is defined
            try:
                #build up server calls
                rpc_calls = {}
                rpc_calls['na'] = u'ffbo.na.query.{}'.format(
                    request['servers']['na'])
                rpc_calls['nk'] = u'ffbo.nk.launch.{}'.format(
                    request['servers']['nk'])

            except Exception as e:
                self.log.warn(
                    "process_nk_request() failed due to incomplete server list in {servers}",
                    servers=str(request['servers']))
                feedback = feedback_error(request,
                                          "Server list not fully defined", e)
                returnValue(feedback)

            # Natural Language Processing
            na_res = request
            try:
                na_task = {
                    'user': request['user'],
                    'command': {
                        "retrieve": {
                            "state": 0
                        }
                    },
                    'format': "nk"
                }
                self.log.info(
                    "process_nk_request() accessed on NA server {server_id} with query: {query}",
                    server_id=rpc_calls['na'],
                    query=na_task)

                #na_res =  yield self.call(rpc_calls['na'], na_task)
                na_res_update = yield self.call(rpc_calls['na'], na_task)
                na_res.update(na_res_update)
                self.log.info(
                    "process_nk_request() accessed on NA server {server_id} with result: {result}",
                    server_id=rpc_calls['na'],
                    result=na_res)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NLP server {server_id}, with error {e}",
                    server_id=rpc_calls['nlp'],
                    e=e)

                feedback = feedback_error(request, "Unable to contact server",
                                          e)
                returnValue(feedback)

            details.progress("Circuit data retrieved from NeuroArch")

            try:
                na_res['user'] = request['user']
                if 'neuron_list' in request:
                    na_res['neuron_list'] = request['neuron_list']

                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)
                    details.progress("Receive partial result from execution.")

                details.progress("Start execution in Neurokernel")
                # Fprward result to the Front End
                na_res["forward"] = six.u("ffbo.gfx.receive_partial." +
                                          str(na_res['user']))
                nk_res = yield self.call(rpc_calls['nk'],
                                         na_res,
                                         options=CallOptions(
                                             on_progress=on_progress,
                                             timeout=30000000000))

                # Did we use a progressive result
                if nk_res is None:
                    nk_res = progressive_result

                if nk_res == {}:
                    feedback = feedback_error(
                        request, "Neurokernel returned zero results")
                    returnValue(feedback)
                #details.progress("Circuit execution completed")

            except ValueError as e:
                self.log.warn(
                    "{server_id} failed to start simulation: {query}}",
                    server_id=rpc_calls['nk'],
                    query=na_res)
                feedback = feedback_error(request, "Execution Failure", e)
                returnValue(feedback)

            except ApplicationError as e:
                self.log.warn(
                    "Processor failed to access NK server {server_id}, with error {e}",
                    server_id=rpc_calls['nk'],
                    e=e)

                feedback = feedback_error(request, "Execution Failure", e)
                returnValue(feedback)

            returnValue(nk_res)
            #details.progress("Neurokernel Started Execution")

            try:
                vis_res = {
                    'success': {
                        'result': {
                            'user': request['user'],
                            'type': 'json'
                        },
                        'user': request['user'],
                        'reset': False
                    }
                }

                if 'error' in vis_res:
                    raise RuntimeError(
                        'Visualisation component was unable to complete the request '
                    )

            except Exception as e:
                self.log.warn(
                    "Processor failed to access complete visualisation")
                feedback = feedback_error(request,
                                          "Unable to create on visualiser", e)
                returnValue(feedback)

            #details.progress("Visualisation: Parsed result")

            self.log.debug(
                "Process_NK_Request complete with request: {request} and result: {result}",
                request=request,
                result=vis_res)

            returnValue(vis_res)

        yield self.register(process_nk_request,
                            six.u('ffbo.processor.nk_execute'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("procedure process_nk_request registered")

        # Recieve Feedback from the User
        @inlineCallbacks
        def log_feedback(user, feedback_message, details):
            try:
                user_details = yield self.call(
                    six.u('ffbo.auth_server.get_user'), details.caller)
                template = "-" * 70 + "Feedback recieved\n username: {user} \n feedback_message: {feedback}\n"
                yield self.feedback_log.info(
                    template.format(user=user_details['username'],
                                    feedback=feedback_message))
            except Exception as e:
                print(e)
                self.log.warn("Failed to log user feedback: {user} {feedback}",
                              user=user,
                              feedback=feedback_message)

                returnValue(False)
            returnValue(True)

        yield self.register(log_feedback, six.u('ffbo.server.log_feedback'),
                            RegisterOptions(details_arg='details'))
        self.log.debug("registered ffbo.server.log_feedback")

        # SUBSCRIBE to session leaving events to check if a server needs removing
        @inlineCallbacks
        def on_session_leave(session_id):
            self.log.info("event for 'on_session_leave' received for session: " \
                    "{session}", session=session_id)
            for stype in directory:
                if str(session_id) in directory[stype]:
                    self.log.info("Server disconnected. removing session" \
                            "{session} of type {stype}", session=session_id, stype=stype)
                    if email_dict is not None:
                        email_res = None
                        title = "Server down"
                        text = "disconnected session {session} of type {stype}".format(
                            session=session_id, stype=stype)
                        self.log.info("Sending email about disconnected session" \
                            "{session} of type {stype}",
                            session=session_id,
                            stype=stype)
                        try:
                            email_res = send_email(text, title)
                        except Exception as e:
                            self.log.info(
                                "Sending email failed with error {error}",
                                error=str(e))
                        if email_res is not None:
                            self.log.info("Tried to send out email... {msg}",
                                          msg=email_res)
                    del directory[stype][str(session_id)]
                    yield self.publish(six.u('ffbo.server.update'), directory)

        yield self.subscribe(on_session_leave, six.u('wamp.session.on_leave'))
        self.log.debug("subscribed to topic 'wamp.session.on_leave'")

        # REGISTER a procedure for registering a new server
        @inlineCallbacks
        def register_new_server(server_id, server_type, server_config):
            if (not server_type in directory):
                print('not in directory')
                print(server_type)
                print(directory)
                returnValue({})

            if server_type != 'nlp':
                if not server_config.get('autobahn', '0.0.0').split(
                        '.')[0] == autobahn.__version__.split('.')[0]:
                    self.log.info(
                        'autobahn version mismatch {server_type}, component autobahn version is {c_version}, and processor autobahn version is {p_version}'
                        .format(server_type=server_type,
                                c_version=server_config.get(
                                    'autobahn', '0.0.0'),
                                p_version=autobahn.__version__))
                    returnValue(
                        json.dumps(
                            'Autobahn version mismatch. Processor has version {}, and you have version {}'
                            .format(autobahn.__version__,
                                    server_config.get('autobahn', 'unknown'))))
            self.log.info("{server_type} server registered with name " \
                            "{server_name} and id {server_id} {dataset}",
                            server_name=server_config.get('name'),
                            server_id=server_id, \
                            server_type=server_type,
                            dataset = 'for dataset {}'.format(server_config.get('dataset')) \
                                      if 'dataset' in server_config else '')

            directory[server_type][str(server_id)] = server_config

            # PUBLISH updated server list after a new server registration
            yield self.publish(six.u('ffbo.server.update'), directory)
            returnValue(json.dumps({'server': directory}))

        yield self.register(register_new_server, six.u('ffbo.server.register'))
        self.log.debug("procedure register_new_server registered")

        @inlineCallbacks
        def nlp_query(server_id, query, user=None):
            """
            Call the nlp module for a nlp translation
            Input

                rpc_call: the WAMP registered RPC call
                query   : the natural language to translate
                user    : used to record state, can be None for stateless

            Return:
                A success or error dictionary
            """

            try:
                self.log.info(
                    "nlp_query() accessed on NLP server {server_id} with query: {query}",
                    server_id=server_id,
                    query=query)
                nlp_res = yield self.call("ffbo.nlp.query." + str(server_id),
                                          query)
                self.log.info(
                    "nlp_query() accessed on NLP server {server_id} with result: {result}",
                    server_id=server_id,
                    result=nlp_res)

                # First Exit Point: NLP parsing has failed
                if nlp_res == {}:
                    raise ValueError('NLP parsing could not parse string')

                returnValue(feedback_success(query, nlp_res))

            except ApplicationError as e:
                # ignore errors due to the frontend not yet having
                # registered the procedure we would like to call
                raise e
                self.log.warn(
                    "nlp_query() failed to access NLP server {server_id}, with error {e}",
                    e=e)

                feedback = feedback_error(rpc_call, "Unable to contact server",
                                          e)
                returnValue(feedback)
            except ValueError as e:
                self.log.warn(
                    "nlp_query() accessed on {server_id} failed to parse query: {query}}",
                    query=query)
                feedback = feedback_error(
                    query, "NLP parsing could not parse string", e)
                returnValue(feedback)

        yield self.register(nlp_query, six.u('ffbo.processor.nlp_query'))
        self.log.debug("procedure ffbo.processor.nlp_query registered")

        @inlineCallbacks
        def neuroarch_query(request, details=None):
            """
            Call the neuroarch_query module with a neuroarch-json object
            The request should have
                user:       session_id for client
                server:     session_id for na server
                query:  neuroarch json query object
            """
            try:
                request['user'] = details.caller
                user_details = yield self.call(
                    six.u('ffbo.auth_server.get_user'), details.caller)
                if user_details: request['username'] = user_details['username']
                self.log.info(
                    "neuroarch_query() accessed with request: {request}",
                    request=request)

                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)

                #print request
                result = yield self.call(
                    six.u('ffbo.na.query.' + str(request['server'])),
                    request,
                    options=CallOptions(on_progress=on_progress))
                self.log.info("na_query returned with result")

                if progressive_result:
                    result = progressive_result

                # Catch no results and return
                if result == "{}":
                    raise ValueError('Neuroarch returned zero results')
                returnValue(feedback_success(request, (result)))

            except ValueError as e:
                feedback = feedback_error(request,
                                          "Neuroarch returned zero results", e)
                returnValue(feedback)

            except ApplicationError as e:
                raise e
                feedback = feedback_error(server_id,
                                          "Unable to contact server", e)
                returnValue(feedback)

        yield self.register(neuroarch_query,
                            six.u('ffbo.processor.neuroarch_query'),
                            RegisterOptions(details_arg='details'))
        self.log.info("procedure ffbo.processor.neuroarch_query registered")

        @inlineCallbacks
        def flycircuit_neuron_query(neuron, details=None):
            self.log.info("Fetch the flycircuit database for neuron: {neuron}",
                          neuron=neuron)
            try:
                res = self.fdb.parse_neuron(neuron)
            except Exception as e:
                print(e)
                res = feedback_error(neuron,
                                     "Unable to fetch flycircuit database", e)
                yield res
            returnValue(res)

        yield self.register(
            flycircuit_neuron_query, six.u("ffbo.processor.fetch_flycircuit"),
            RegisterOptions(details_arg='details', concurrency=4))
        self.log.info("procedure ffbo.processor.fetch_flycircuit registered")

        @inlineCallbacks
        def neurokernel_query(request):
            """
            Call the neuroarch_query module with a neuroarch-json object
            The request should have
                user:       session_id for client
                server:     session_id for na server
                query:  neuroarch json query object
            """
            try:
                progressive_result = {}

                def on_progress(p):
                    progressive_result.update(p)

                result = yield self.call(
                    six.u('ffbo.nk.launch.' + str(request['server'])), request)
                self.log.info("nk_query returned with result")

                if result is None:
                    result = progressive_result

                # Catch no results and return
                if result == "{}":
                    raise ValueError('Neurokernel returned zero results')
                returnValue(feedback_success(request, (result)))

            except ValueError as e:
                feedback = feedback_error(request,
                                          "Neurokernel returned zero results",
                                          e)
                returnValue(feedback)

            except ApplicationError as e:
                raise e
                feedback = feedback_error(server_id,
                                          "Unable to contact server", e)
                returnValue(feedback)

        # REGISTER a procedure for relaying current servers to single ui
        def relay_server_information():
            self.log.debug("relay_server_information rpc called")
            return directory

        yield self.register(relay_server_information,
                            six.u('ffbo.processor.server_information'))
        self.log.debug("ffbo.processor.server_information registered")
Ejemplo n.º 24
0
                        type=str,
                        help='XBR node verifications directory')

    parser.add_argument(
        '--wallet',
        dest='wallet',
        type=str,
        default='E11BA2b4D45Eaed5996Cd0823791E0C93114882d',
        help=
        'HEX encoded member wallet address (the one used to register the member in the first place).'
    )

    args = parser.parse_args()

    if args.debug:
        txaio.start_logging(level='debug')
    else:
        txaio.start_logging(level='info')

    extra = {
        'ethkey': binascii.a2b_hex(args.ethkey),
        'cskey': binascii.a2b_hex(args.cskey),
        'member_email': args.email,
        'wallet_adr': args.wallet,
        'verifications': args.verifications,
    }

    runner = ApplicationRunner(url=args.url,
                               realm=args.realm,
                               extra=extra,
                               serializers=[CBORSerializer()])
Ejemplo n.º 25
0
    def run(self, make):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable
        """

        # 1) factory for use ApplicationSession
        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception as e:
                self.log.error(
                    'ApplicationSession could not be instantiated: {}'.format(
                        e))
                loop = asyncio.get_event_loop()
                if loop.is_running():
                    loop.stop()
                raise
            else:
                return session

        isSecure, host, port, resource, path, params = parse_url(self.url)

        if self.ssl is None:
            ssl = isSecure
        else:
            if self.ssl and not isSecure:
                raise RuntimeError(
                    'ssl argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            ssl = self.ssl

        # 2) create a WAMP-over-WebSocket transport client factory
        transport_factory = WampWebSocketClientFactory(
            create, url=self.url, serializers=self.serializers)

        # 3) start the client
        loop = asyncio.get_event_loop()
        txaio.use_asyncio()
        txaio.config.loop = loop
        coro = loop.create_connection(transport_factory, host, port, ssl=ssl)
        (transport, protocol) = loop.run_until_complete(coro)

        # start logging
        txaio.start_logging(level='info')

        try:
            loop.add_signal_handler(signal.SIGTERM, loop.stop)
        except NotImplementedError:
            # signals are not available on Windows
            pass

        # 4) now enter the asyncio event loop
        try:
            loop.run_forever()
        except KeyboardInterrupt:
            # wait until we send Goodbye if user hit ctrl-c
            # (done outside this except so SIGTERM gets the same handling)
            pass

        # give Goodbye message a chance to go through, if we still
        # have an active session
        if protocol._session:
            loop.run_until_complete(protocol._session.leave())

        loop.close()
Ejemplo n.º 26
0
def test_double_start(handler, framework):
    try:
        txaio.start_logging()
    except RuntimeError:
        assert False, "shouldn't get exception"
Ejemplo n.º 27
0
    def run(self, make, start_reactor=True):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_reactor: if True (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.

        :returns: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor
            txaio.start_logging(level='info')

        isSecure, host, port, resource, path, params = parseWsUrl(self.url)

        # factory for use ApplicationSession
        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception as e:
                if start_reactor:
                    # the app component could not be created .. fatal
                    self.log.error(str(e))
                    reactor.stop()
                else:
                    # if we didn't start the reactor, it's up to the
                    # caller to deal with errors
                    raise
            else:
                return session

        # create a WAMP-over-WebSocket transport client factory
        transport_factory = WampWebSocketClientFactory(
            create,
            url=self.url,
            serializers=self.serializers,
            proxy=self.proxy)

        # supress pointless log noise like
        # "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
        transport_factory.noisy = False

        # if user passed ssl= but isn't using isSecure, we'll never
        # use the ssl argument which makes no sense.
        context_factory = None
        if self.ssl is not None:
            if not isSecure:
                raise RuntimeError(
                    'ssl= argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            context_factory = self.ssl
        elif isSecure:
            from twisted.internet.ssl import optionsForClientTLS
            context_factory = optionsForClientTLS(host)

        from twisted.internet import reactor
        if self.proxy is not None:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, self.proxy['host'],
                                        self.proxy['port'])
            transport_factory.contextFactory = context_factory
        elif isSecure:
            from twisted.internet.endpoints import SSL4ClientEndpoint
            assert context_factory is not None
            client = SSL4ClientEndpoint(reactor, host, port, context_factory)
        else:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, host, port)

        d = client.connect(transport_factory)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()

            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if we exited due to a connection error, raise that to the
            # caller
            if connect_error.exception:
                raise connect_error.exception

        else:
            # let the caller handle any errors
            return d
Ejemplo n.º 28
0
    def run(self, make, start_loop=True, log_level='info'):
        """
        Run the application component. Under the hood, this runs the event
        loop (unless `start_loop=False` is passed) so won't return
        until the program is done.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_loop: When ``True`` (the default) this method
            start a new asyncio loop.
        :type start_loop: bool

        :returns: None is returned, unless you specify
            `start_loop=False` in which case the coroutine from calling
            `loop.create_connection()` is returned. This will yield the
            (transport, protocol) pair.
        """
        if callable(make):
            def create():
                cfg = ComponentConfig(self.realm, self.extra)
                try:
                    session = make(cfg)
                except Exception as e:
                    self.log.error('ApplicationSession could not be instantiated: {}'.format(e))
                    loop = asyncio.get_event_loop()
                    if loop.is_running():
                        loop.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith(u'rs'):
            # try to parse RawSocket URL ..
            isSecure, host, port = parse_rs_url(self.url)

            # use the first configured serializer if any (which means, auto-choose "best")
            serializer = self.serializers[0] if self.serializers else None

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(create, serializer=serializer)

        else:
            # try to parse WebSocket URL ..
            isSecure, host, port, resource, path, params = parse_ws_url(self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server ..
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage_delate responses from the server ..
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # set WebSocket options for all client connections
            transport_factory.setProtocolOptions(maxFramePayloadSize=1048576,
                                                 maxMessagePayloadSize=1048576,
                                                 autoFragmentSize=65536,
                                                 failByDrop=False,
                                                 openHandshakeTimeout=2.5,
                                                 closeHandshakeTimeout=1.,
                                                 tcpNoDelay=True,
                                                 autoPingInterval=10.,
                                                 autoPingTimeout=5.,
                                                 autoPingSize=4,
                                                 perMessageCompressionOffers=offers,
                                                 perMessageCompressionAccept=accept)
        # SSL context for client connection
        if self.ssl is None:
            ssl = isSecure
        else:
            if self.ssl and not isSecure:
                raise RuntimeError(
                    'ssl argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            ssl = self.ssl

        # start the client connection
        loop = asyncio.get_event_loop()
        if loop.is_closed() and start_loop:
            asyncio.set_event_loop(asyncio.new_event_loop())
            loop = asyncio.get_event_loop()
        txaio.use_asyncio()
        txaio.config.loop = loop
        coro = loop.create_connection(transport_factory, host, port, ssl=ssl)

        # start a asyncio loop
        if not start_loop:
            return coro
        else:
            (transport, protocol) = loop.run_until_complete(coro)

            # start logging
            txaio.start_logging(level=log_level)

            try:
                loop.add_signal_handler(signal.SIGTERM, loop.stop)
            except NotImplementedError:
                # signals are not available on Windows
                pass

            # 4) now enter the asyncio event loop
            try:
                loop.run_forever()
            except KeyboardInterrupt:
                # wait until we send Goodbye if user hit ctrl-c
                # (done outside this except so SIGTERM gets the same handling)
                pass

            # give Goodbye message a chance to go through, if we still
            # have an active session
            if protocol._session:
                loop.run_until_complete(protocol._session.leave())

            loop.close()
Ejemplo n.º 29
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument('--loglevel',
                        default="info",
                        choices=['none', 'error', 'warn', 'info', 'debug', 'trace'],
                        help='Initial log level.')

    parser.add_argument('-c',
                        '--cbdir',
                        type=six.text_type,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-r',
                        '--realm',
                        type=six.text_type,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container', 'websocket-testee'],
                        help='Worker type (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=six.text_type,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('--title',
                        type=six.text_type,
                        default=None,
                        help='Worker process title to set (optional).')

    parser.add_argument('--expose_controller',
                        type=bool,
                        default=False,
                        help='Expose node controller session to all components (this feature requires Crossbar.io Fabric extension).')

    parser.add_argument('--expose_shared',
                        type=bool,
                        default=False,
                        help='Expose a shared object to all components (this feature requires Crossbar.io Fabric extension).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)
    start_logging(None, options.loglevel)

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Worker process starting ({python}-{reactor}) ..",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]',
                'websocket-testee': 'crossbar-worker [websocket-testee]'
            }
            setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip())

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession
    from crossbar.worker.testee import WebSocketTesteeWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession,
        'websocket-testee': WebSocketTesteeWorkerSession
    }

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}", reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory, u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 30
0
    async def start(self, reactor, url=None, realm=None, profile=None):
        """
        Start main application. This will read the user configuration, potentially asking
        for a user password.

        :param reactor: Twisted reactor to use.
        :param url: Optionally override network URL as defined in profile.
        :param realm: Optionally override network URL as defined in profile.
        :param profile: User profile name to load.
        :return:
        """
        txaio.start_logging(level='info')

        self.log.info('ok, application starting for user profile "{profile}"',
                      profile=profile)

        if not os.path.isdir(self.DOTDIR):
            os.mkdir(self.DOTDIR)
            self.log.info('dotdir created: "{dotdir}"', dotdir=self.DOTDIR)

        self._config_path = config_path = os.path.join(self.DOTDIR,
                                                       self.DOTFILE)
        self._profile_name = profile or 'default'
        if not os.path.isfile(self._config_path):
            self.log.info('no config exist under "{config_path}"',
                          config_path=self._config_path)
            self._config = UserConfig(self._config_path)
            self._profile = None
        else:
            self._config = UserConfig(self._config_path)

            # FIXME: start modal dialog to get password from user

            def getpw():
                return '123secret'

            self._config.load(cb_get_password=getpw)
            if self._profile_name not in self._config.profiles:
                raise click.ClickException(
                    'no such profile "{}" in config "{}" with {} profiles'.
                    format(self._profile_name, config_path,
                           len(self._config.profiles)))
            else:
                self._profile = self._config.profiles[self._profile_name]
                self.log.info(
                    'user profile "{profile_name}" loaded from "{config_path}":\n\n',
                    config_path=self._config_path,
                    profile_name=self._profile_name)
                pprint(self._profile.marshal())
                print('\n\n')

        extra = {
            'ready': txaio.create_future(),
            'done': txaio.create_future(),
            'running': True,
            'config': self._config,
            'config_path': self._config_path,
            'profile': self._profile,
            'profile_name': self._profile_name,
        }
        # XBR network node used as a directory server and gateway to XBR smart contracts
        network_url = url or (self._profile.network_url
                              if self._profile and self._profile.network_url
                              else 'ws://localhost:8090/ws')

        # WAMP realm on network node, usually "xbrnetwork"
        network_realm = realm or (self._profile.network_realm if self._profile
                                  and self._profile.network_realm else
                                  'xbrnetwork')

        runner = ApplicationRunner(url=network_url,
                                   realm=network_realm,
                                   extra=extra,
                                   serializers=[CBORSerializer()])

        self.log.info(
            'ok, now connecting to "{network_url}", joining realm "{network_realm}" ..',
            network_url=network_url,
            network_realm=network_realm)
        await runner.run(ApplicationClient,
                         reactor=reactor,
                         auto_reconnect=True,
                         start_reactor=False)
        self.log.info('ok, application client connected!')

        session, details = await extra['ready']
        self.log.info('ok, application session joined: {details}',
                      details=details)

        def on_exit(_):
            self.log.info('exiting application ..')
            extra['running'] = False
            txaio.resolve(extra['done'], None)

        win = ApplicationWindow(reactor, session, self._config,
                                self._config_path, self._profile,
                                self._profile_name)
        win.connect("cancel", on_exit)
        win.connect("destroy", on_exit)
        win.show_all()

        await win.start()

        ticks = 0
        while extra['running']:
            ticks += 1
            self.log.info(
                'ok, application main task still running at tick {ticks}',
                ticks=ticks)
            await sleep(5)

        self.log.info('ok, application main task ended!')
Ejemplo n.º 31
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Enable debug output.')
    parser.add_argument('-p',
                        '--profile',
                        dest='profile',
                        type=str,
                        help='The name of the profile to use')
    args = parser.parse_args()
    setproctitle('xbr_auth_{}'.format(args.profile))
    #
    #   We're supporting debug or info .. for a quiet life, switch 'info' for 'warning'
    #
    txaio.start_logging(level='debug' if args.debug else 'info')
    #
    #   This is just a wrapper for our configuration, the conf object is passed through
    #   to our service in the 'extra' parameter.
    #
    auth_conf = MicroServiceConfig(args.profile, 'auth')
    logger_conf = MicroServiceConfig(args.profile, 'logger')
    #
    #   This is our connection to the logging realm for infrastructure traffic that
    #   should be partitioned off from application traffic.

    logger = ApplicationRunner(
        url=logger_conf.server_url,
        realm=logger_conf.realm,
        ssl=ssl.DefaultOpenSSLContextFactory(
            privateKeyFileName=CLIENT_KEY.format(logger_conf.name),
Ejemplo n.º 32
0
def main():
    # Start logging
    txaio.start_logging(level=os.environ.get("LOGLEVEL", "info"))

    parser = make_parser()

    #Not used anymore, but we don't it to break the agent if these args are passed
    parser.add_argument('--fake-data', help=argparse.SUPPRESS)
    parser.add_argument('--num-channels', help=argparse.SUPPRESS)

    # Interpret options in the context of site_config.
    args = site_config.parse_args(agent_class='Lakeshore240Agent',
                                  parser=parser)

    if args.fake_data is not None:
        warnings.warn(
            "WARNING: the --fake-data parameter is deprecated, please "
            "remove from your site-config file", DeprecationWarning)

    if args.num_channels is not None:
        warnings.warn(
            "WARNING: the --num-channels parameter is deprecated, please "
            "remove from your site-config file", DeprecationWarning)

    # Automatically acquire data if requested (default)
    init_params = False
    if args.mode == 'init':
        init_params = {'auto_acquire': False}
    elif args.mode == 'acq':
        init_params = {'auto_acquire': True}

    device_port = None
    if args.port is not None:
        device_port = args.port
    else:  # Tries to find correct USB port automatically

        # This exists if udev rules are setup properly for the 240s
        if os.path.exists('/dev/{}'.format(args.serial_number)):
            device_port = "/dev/{}".format(args.serial_number)

        elif os.path.exists('/dev/serial/by-id'):
            ports = os.listdir('/dev/serial/by-id')
            for port in ports:
                if args.serial_number in port:
                    device_port = "/dev/serial/by-id/{}".format(port)
                    print("Found port {}".format(device_port))
                    break

    if device_port is None:
        print("Could not find device port for {}".format(args.serial_number))
        return

    agent, runner = ocs_agent.init_site_agent(args)

    kwargs = {'port': device_port}

    if args.sampling_frequency is not None:
        kwargs['f_sample'] = float(args.sampling_frequency)

    therm = LS240_Agent(agent, **kwargs)

    agent.register_task('init_lakeshore',
                        therm.init_lakeshore,
                        startup=init_params)
    agent.register_task('set_values', therm.set_values)
    agent.register_task('upload_cal_curve', therm.upload_cal_curve)
    agent.register_process('acq', therm.acq, therm._stop_acq)

    runner.run(agent, auto_reconnect=True)
Ejemplo n.º 33
0
                              collection at the end of a scan.")
    pgroup.add_argument('--auto-acquire',
                        type=bool,
                        default=True,
                        help='Automatically start data acquisition on startup')

    return parser


if __name__ == '__main__':
    # For logging
    txaio.use_twisted()
    LOG = txaio.make_logger()

    # Start logging
    txaio.start_logging(level=os.environ.get("LOGLEVEL", "info"))

    # Get the default ocs argument parser.
    site_parser = site_config.add_arguments()

    parser = make_parser(site_parser)

    # Parse comand line.
    args = parser.parse_args()

    # Automatically acquire data if requested (default)
    init_params = False
    if args.auto_acquire:
        init_params = {'auto_acquire': True}

    # Interpret options in the context of site_config.
Ejemplo n.º 34
0
    def run(self, make, start_loop=True):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_loop: When ``True`` (the default) this method
            start a new asyncio loop.
        :type start_loop: bool
        """
        if callable(make):

            def create():
                cfg = ComponentConfig(self.realm, self.extra)
                try:
                    session = make(cfg)
                except Exception as e:
                    self.log.error(
                        'ApplicationSession could not be instantiated: {}'.
                        format(e))
                    loop = asyncio.get_event_loop()
                    if loop.is_running():
                        loop.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith(u'rs'):
            # try to parse RawSocket URL ..
            isSecure, host, port = parse_rs_url(self.url)

            # use the first configured serializer if any (which means, auto-choose "best")
            serializer = self.serializers[0] if self.serializers else None

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(
                create, serializer=serializer)

        else:
            # try to parse WebSocket URL ..
            isSecure, host, port, resource, path, params = parse_ws_url(
                self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(
                create,
                url=self.url,
                serializers=self.serializers,
                proxy=self.proxy,
                headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server ..
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage_delate responses from the server ..
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # set WebSocket options for all client connections
            transport_factory.setProtocolOptions(
                maxFramePayloadSize=1048576,
                maxMessagePayloadSize=1048576,
                autoFragmentSize=65536,
                failByDrop=False,
                openHandshakeTimeout=2.5,
                closeHandshakeTimeout=1.,
                tcpNoDelay=True,
                autoPingInterval=10.,
                autoPingTimeout=5.,
                autoPingSize=4,
                perMessageCompressionOffers=offers,
                perMessageCompressionAccept=accept)
        # SSL context for client connection
        if self.ssl is None:
            ssl = isSecure
        else:
            if self.ssl and not isSecure:
                raise RuntimeError(
                    'ssl argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            ssl = self.ssl

        # start the client connection
        loop = asyncio.get_event_loop()
        txaio.use_asyncio()
        txaio.config.loop = loop
        coro = loop.create_connection(transport_factory, host, port, ssl=ssl)
        (transport, protocol) = loop.run_until_complete(coro)

        # start a asyncio loop
        if not start_loop:
            return protocol
        else:
            # start logging
            txaio.start_logging(level='info')

            try:
                loop.add_signal_handler(signal.SIGTERM, loop.stop)
            except NotImplementedError:
                # signals are not available on Windows
                pass

            # 4) now enter the asyncio event loop
            try:
                loop.run_forever()
            except KeyboardInterrupt:
                # wait until we send Goodbye if user hit ctrl-c
                # (done outside this except so SIGTERM gets the same handling)
                pass

            # give Goodbye message a chance to go through, if we still
            # have an active session
            if protocol._session:
                loop.run_until_complete(protocol._session.leave())

            loop.close()
Ejemplo n.º 35
0
def _startlog(options, reactor):
    """
    Start the logging in a way that all the subcommands can use it.
    """
    loglevel = getattr(options, "loglevel", "info")
    logformat = getattr(options, "logformat", "none")
    colour = getattr(options, "colour", "auto")

    set_global_log_level(loglevel)

    # The log observers (things that print to stderr, file, etc)
    observers = []

    if getattr(options, "logtofile", False):
        # We want to log to a file
        if not options.logdir:
            logdir = options.cbdir
        else:
            logdir = options.logdir

        logfile = os.path.join(logdir, "node.log")

        if loglevel in ["error", "warn", "info"]:
            show_source = False
        else:
            show_source = True

        observers.append(make_logfile_observer(logfile, show_source))
    else:
        # We want to log to stdout/stderr.

        if colour == "auto":
            if sys.__stdout__.isatty():
                colour = True
            else:
                colour = False
        elif colour == "true":
            colour = True
        else:
            colour = False

        if loglevel == "none":
            # Do no logging!
            pass
        elif loglevel in ["error", "warn", "info"]:
            # Print info to stdout, warn+ to stderr
            observers.append(make_stdout_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "debug":
            # Print debug+info to stdout, warn+ to stderr, with the class
            # source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "trace":
            # Print trace+, with the class source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  trace=True,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        else:
            assert False, "Shouldn't ever get here."

    for observer in observers:
        globalLogPublisher.addObserver(observer)

        # Make sure that it goes away
        reactor.addSystemEventTrigger('after', 'shutdown',
                                      globalLogPublisher.removeObserver, observer)

    # Actually start the logger.
    start_logging(None, loglevel)
def run():
    parser = RunnerArgumentParser()
    parser.add_argument(
        '-c',
        '--component',
        action='append',
        dest='components',
        required=True,
        help=
        'Fully-qualified path to a Component class. Can be used multiple times'
    )
    parser.add_argument(
        '-n',
        '--necromancy',
        action='store_true',
        default=False,
        help=
        'Enable necromancy. Attempts to revive Components whose connection to the WAMP router has failed'
    )
    parser.add_argument(
        '--necromancy-sleep',
        default=10,
        type=int,
        help='Configure sleep-time between transport death checks')
    args = parser.parse_args()
    extras = {}
    serializers = None
    if args.extra_file is not None:
        extras = json.load(open(args.extra_file))
    if args.serializers is not None:
        serializers = [
            get_class(serializer) for serializer in args.serializers
        ]
    components__runners = [
        (get_class(component),
         ApplicationRunner(extra=extras.get(component),
                           serializers=serializers,
                           **{
                               key: value
                               for key, value in vars(args).items()
                               if key not in ('components', 'log_level',
                                              'extra_file', 'serializers',
                                              'necromancy', 'necromancy_sleep')
                           })) for component in args.components
    ]
    loop = asyncio.get_event_loop()
    txaio.use_asyncio()
    txaio.config.loop = loop
    coros = [
        runner.run(component, start_loop=False, log_level=args.log_level)
        for component, runner in components__runners
    ]
    results = loop.run_until_complete(asyncio.gather(*coros))
    txaio.start_logging(level=args.log_level)
    logger = logging.getLogger('autobahn-python-runners')

    if args.necromancy:
        logging.info('Necromancy enabled')

        @asyncio.coroutine
        def necromancy_check():
            nonlocal results, components__runners
            while True:
                yield from asyncio.sleep(args.necromancy_sleep)
                logger.info('Checking for dead transports...')
                data = enumerate(zip(components__runners, results))
                for index, ((component, runner), (transport,
                                                  protocol)) in data:
                    if transport.is_closing() and not protocol._session:
                        logger.info(
                            'Dead transport detected. Attempting to raise the dead...'
                        )
                        results[index] = yield from runner.run(
                            component,
                            start_loop=False,
                            log_level=args.log_level)

        asyncio.ensure_future(necromancy_check(), loop=loop)

    try:
        loop.add_signal_handler(signal.SIGTERM, loop.stop)
    except NotImplementedError:
        # signals are not available on Windows
        pass

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        # wait until we send Goodbye if user hit ctrl-c
        # (done outside this except so SIGTERM gets the same handling)
        pass

    coros = [
        loop.run_until_complete(protocol._session.leave())
        for tranport, protocol in results if protocol._session
    ]

    loop.close()
Ejemplo n.º 37
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument('--loglevel',
                        default="info",
                        choices=['none', 'error', 'warn', 'info', 'debug', 'trace'],
                        help='Initial log level.')

    parser.add_argument('-c',
                        '--cbdir',
                        type=six.text_type,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-r',
                        '--realm',
                        type=six.text_type,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container', 'websocket-testee'],
                        help='Worker type (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=six.text_type,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('--title',
                        type=six.text_type,
                        default=None,
                        help='Worker process title to set (optional).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)
    start_logging(None, options.loglevel)

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Worker process starting ({python}-{reactor}) ..",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]',
                'websocket-testee': 'crossbar-worker [websocket-testee]'
            }
            setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip())

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession
    from crossbar.worker.testee import WebSocketTesteeWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession,
        'websocket-testee': WebSocketTesteeWorkerSession
    }

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}", reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory, u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 38
0
    parser = argparse.ArgumentParser(
        description='Autobahn Testee Client (asyncio)')
    parser.add_argument('--url',
                        dest='url',
                        type=str,
                        default='ws://127.0.0.1:9001',
                        help='The WebSocket fuzzing server URL.')
    parser.add_argument('--loglevel',
                        dest='loglevel',
                        type=str,
                        default='info',
                        help='Log level, eg "info" or "debug".')

    options = parser.parse_args()

    txaio.start_logging(level=options.loglevel)

    factory = TesteeClientFactory(options.url, autobahn.asyncio.__ident__)

    _, host, port, _, _, _ = parse_url(options.url)

    loop = asyncio.get_event_loop()

    factory.resource = '/getCaseCount'
    factory.endCaseId = None
    factory.currentCaseId = 0
    factory.updateReports = True

    while True:

        factory._done = txaio.create_future()
Ejemplo n.º 39
0
import functools
import txaio
import sys
import docopt
import pkg_resources

import pygmalion.database
import pygmalion.models
import pygmalion.views
import pygmalion.plugins
import pygmalion.configure
from pygmalion.configure import config
import pygmalion.serializer

log = txaio.make_logger()
txaio.start_logging()
running = True

serializer.JsonObjectSerializer.serialize = pygmalion.serializer.serialize
serializer.JsonObjectSerializer.unserialize = pygmalion.serializer.unserialize


def server():
    @asyncio.coroutine
    def exit():
        return loop.stop()

    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)
        global running
        running = False
Ejemplo n.º 40
0
def _run_command_exec_worker(options, reactor=None, personality=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes
    MEASURING_COVERAGE = False
    if 'COVERAGE_PROCESS_START' in os.environ:
        try:
            import coverage
        except ImportError:
            pass
        else:
            # The following will read the environment variable COVERAGE_PROCESS_START,
            # and that should be set to the .coveragerc file:
            #
            #   export COVERAGE_PROCESS_START=${PWD}/.coveragerc
            #
            coverage.process_startup()
            MEASURING_COVERAGE = True

    # we use an Autobahn utility to import the "best" available Twisted reactor
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # make sure logging to something else than stdio is setup _first_
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.
    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")

    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # get personality klass, eg "crossbar.personality.Personality"
    l = options.personality.split('.')
    personality_module, personality_klass = '.'.join(l[:-1]), l[-1]

    # now load the personality module and class
    _mod = importlib.import_module(personality_module)
    Personality = getattr(_mod, personality_klass)

    # get worker klass, eg "crossbar.worker.container.ContainerController"
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Starting worker "{worker_id}" for node "{node_id}" on realm "{realm}" with personality "{personality}" {worker_class}',
        worker_id=options.worker,
        node_id=options.node,
        realm=options.realm,
        personality=Personality.NAME,
        worker_class=hltype(klass),
    )
    log.info(
        'Running as PID {pid} on {python}-{reactor}',
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )
    if MEASURING_COVERAGE:
        log.info(hl(
            'Code coverage measurements enabled (coverage={coverage_version}).',
            color='green',
            bold=True),
                 coverage_version=coverage.__version__)

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug(
            "Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(
                options.klass))

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):
        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}",
                             reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit,
                                              exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    # if vmprof global profiling is enabled via command line option, this will carry
    # the file where vmprof writes its profile data
    if _HAS_VMPROF:
        _vm_prof = {
            # need to put this into a dict, since FDs are ints, and python closures can't
            # write to this otherwise
            'outfd': None
        }

    # https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorCore.html
    # Each "system event" in Twisted, such as 'startup', 'shutdown', and 'persist', has 3 phases:
    # 'before', 'during', and 'after' (in that order, of course). These events will be fired
    # internally by the Reactor.

    def before_reactor_started():
        term_print('CROSSBAR[{}]:REACTOR_STARTING'.format(options.worker))

    def after_reactor_started():
        term_print('CROSSBAR[{}]:REACTOR_STARTED'.format(options.worker))

        if _HAS_VMPROF and options.vmprof:
            outfn = os.path.join(
                options.cbdir,
                '.vmprof-worker-{}-{}.dat'.format(options.worker, os.getpid()))
            _vm_prof['outfd'] = os.open(outfn,
                                        os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(_vm_prof['outfd'], period=0.01)
            term_print('CROSSBAR[{}]:VMPROF_ENABLED:{}'.format(
                options.worker, outfn))

    def before_reactor_stopped():
        term_print('CROSSBAR[{}]:REACTOR_STOPPING'.format(options.worker))

        if _HAS_VMPROF and options.vmprof and _vm_prof['outfd']:
            vmprof.disable()
            term_print('CROSSBAR[{}]:VMPROF_DISABLED'.format(options.worker))

    def after_reactor_stopped():
        # FIXME: we are indeed reaching this line, however,
        # the log output does not work (it also doesnt work using
        # plain old print). Dunno why.

        # my theory about this issue is: by the time this line
        # is reached, Twisted has already closed the stdout/stderr
        # pipes. hence we do an evil trick: we directly write to
        # the process' controlling terminal
        # https://unix.stackexchange.com/a/91716/52500
        term_print('CROSSBAR[{}]:REACTOR_STOPPED'.format(options.worker))

    reactor.addSystemEventTrigger('before', 'startup', before_reactor_started)
    reactor.addSystemEventTrigger('after', 'startup', after_reactor_started)
    reactor.addSystemEventTrigger('before', 'shutdown', before_reactor_stopped)
    reactor.addSystemEventTrigger('after', 'shutdown', after_reactor_stopped)

    try:
        # define a WAMP application session factory
        #
        from autobahn.wamp.types import ComponentConfig

        def make_session():
            session_config = ComponentConfig(realm=options.realm,
                                             extra=options)
            session = klass(config=session_config,
                            reactor=reactor,
                            personality=Personality)
            return session

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(make_session,
                                                       u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        log.info(hl('Entering event reactor ...', color='green', bold=True))
        reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 41
0
def server():
    @asyncio.coroutine
    def exit():
        return loop.stop()

    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)
        global running
        running = False
        for task in asyncio.Task.all_tasks():
            task.cancel()
        asyncio.ensure_future(exit())

    # Kill the loop on Ctrl+C
    loop = asyncio.get_event_loop()
    try:
        loop.add_signal_handler(signal.SIGINT,
                                functools.partial(nicely_exit, 'SIGINT'))
        loop.add_signal_handler(signal.SIGTERM,
                                functools.partial(nicely_exit, 'SIGTERM'))
    except NotImplementedError:
        pass  # Python on windows does not implement signal handlers under asyncio

    version_num = version = pkg_resources.require("pygmalion")[0].version
    arguments = docopt.docopt(__doc__.format(sys.argv[0]),
                              version=str(version_num))
    arguments = {k.lstrip('--'): v for k, v in arguments.items()}

    verbose = arguments.get("verbose", 0)
    quiet = arguments.get("quiet", 0)
    level = "error"
    if verbose == 1:
        level = "warn"
    elif verbose == 2:
        level = "info"
    elif verbose == 3:
        level = "debug"
    elif verbose == 4:
        level = "trace"
    elif quiet == 1:
        level = "critical"
    txaio.start_logging(level)

    pygmalion.configure.get_config(arguments.get("config", ""))

    while running:
        try:
            task = pygmalion.database.initialize(config=config, callback=start)
            loop.run_until_complete(task)
        except concurrent.futures._base.CancelledError as e:
            if not running:
                pass
            else:
                print("Unexpected cancelled future: {}".format(e))
        except RuntimeError as e:
            if not running:
                pass
            else:
                # Usually means the connection to the crossbar server was lost.
                print("Runtime Error: {}".format(e))

    loop.close()
Ejemplo n.º 42
0
def test_invalid_level(framework):
    try:
        txaio.start_logging(level='foo')
        assert False, "should get exception"
    except RuntimeError as e:
        assert 'Invalid log level' in str(e)
Ejemplo n.º 43
0
    def run_context(self, ctx):

        if False:
            click.echo('Logging started ..')
            txaio.start_logging(level='debug', out=sys.stdout)

        # cfg contains the command lines options and arguments that
        # click collected for us
        cfg = ctx.obj

        cmd = ctx.command.name
        if cmd not in [u'auth', u'shell']:
            raise click.ClickException(
                '"{}" command can only be run in shell'.format(cmd))

        click.echo('Crossbar.io Shell: {}'.format(
            style_ok('v{}'.format(__version__))))

        # load user profile and key for given profile name
        key, profile = self._load_profile(profile=cfg.profile)

        # set the Fabric URL to connect to from the profile or default
        url = profile.url or u'wss://fabric.crossbario.com'

        # users always authenticate with the user_id from the key, which
        # filled from the email the user provided
        authid = key.user_id

        # the realm can be set from command line, env var, the profile
        # or can be None, which means the user will be joined to the global
        # Crossbar.io Fabric users realm (u'com.crossbario.fabric')
        realm = cfg.realm or profile.realm or None

        # the authrole can be set from command line, env var, the profile
        # or can be None, in which case the role is chosen automatically
        # from the list of roles the user us authorized for
        authrole = cfg.role or profile.role or None

        # this will be fired when the ShellClient below actually has joined
        # the respective realm on Crossbar.io Fabric (either the global users
        # realm, or a management realm the user has a role on)
        ready = asyncio.Future()  # type: ignore

        extra = {
            # these are forward on the actual client connection
            u'authid': authid,
            u'authrole': authrole,

            # these are native Python object and only used client-side
            u'key': key.key,
            u'done': ready
        }

        # for the "auth" command, forward additional command line options
        if ctx.command.name == u'auth':
            # user provides authentication code to verify
            extra[u'activation_code'] = cfg.code

            # user requests sending of a new authentication code (while an old one is still pending)
            extra[u'request_new_activation_code'] = cfg.new_code

        # this is the WAMP ApplicationSession that connects the CLI to Crossbar.io Fabric
        self.session = client.ShellClient(ComponentConfig(realm, extra))

        loop = asyncio.get_event_loop()
        runner = ApplicationRunner(url, realm)

        # this might fail eg when the transport connection cannot be established
        try:
            click.echo('Connecting to {} ..'.format(url))
            _res = runner.run(self.session, start_loop=False)
        except socket.gaierror as e:
            click.echo(
                style_error('Could not connect to {}: {}'.format(url, e)))
            loop.close()
            sys.exit(1)

        exit_code = 0
        try:
            # "connected" will complete when the WAMP session to Fabric
            # has been established and is ready
            click.echo('Entering event loop ..')
            transport, protocol = loop.run_until_complete(_res)
            # click.echo('transport, protocol: {} {}'.format(transport, protocol))
            # loop.run_forever()
            session_details = loop.run_until_complete(ready)
            # click.echo('SessionDetails: {}'.format(session_details))

        except ApplicationError as e:

            # some ApplicationErrors are actually signaling progress
            # in the authentication flow, some are real errors

            if e.error.startswith(u'fabric.auth-failed.'):
                error = e.error.split(u'.')[2]
                message = e.args[0]

                if error == u'new-user-auth-code-sent':

                    click.echo('\nThanks for registering! {}'.format(message))
                    click.echo(
                        style_ok(
                            'Please check your inbox and run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>.\n'
                        ))

                elif error == u'registered-user-auth-code-sent':

                    click.echo('\nWelcome back! {}'.format(message))
                    click.echo(
                        style_ok(
                            'Please check your inbox and run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>.\n'
                        ))

                elif error == u'pending-activation':

                    click.echo()
                    click.echo(style_ok(message))
                    click.echo()
                    click.echo(
                        'Tip: to activate, run "cbsh auth --code <THE CODE YOU GOT BY EMAIL>"'
                    )
                    click.echo(
                        'Tip: you can request sending a new code with "cbsh auth --new-code"'
                    )
                    click.echo()

                elif error == u'no-pending-activation':

                    exit_code = 1
                    click.echo()
                    click.echo(style_error('{} [{}]'.format(message, e.error)))
                    click.echo()

                elif error == u'email-failure':

                    exit_code = 1
                    click.echo()
                    click.echo(style_error('{} [{}]'.format(message, e.error)))
                    click.echo()

                elif error == u'invalid-activation-code':

                    exit_code = 1
                    click.echo()
                    click.echo(style_error('{} [{}]'.format(message, e.error)))
                    click.echo()

                else:

                    # we should not arrive here! otherwise, add a new clause above and handle the situation
                    exit_code = 1
                    click.echo(
                        style_error(
                            'Internal error: unprocessed error type {}:'.
                            format(error)))
                    click.echo(style_error(message))

            elif e.error.startswith(u'crossbar.error.'):

                error = e.error.split(u'.')[2]
                message = e.args[0]

                if error == u'invalid_configuration':

                    click.echo()
                    click.echo(style_error('{} [{}]'.format(message, e.error)))
                    click.echo()
                else:

                    # we should not arrive here! otherwise, add a new clause above and handle the situation
                    exit_code = 1
                    click.echo(
                        style_error(
                            'Internal error: unprocessed error type {}:'.
                            format(error)))
                    click.echo(style_error(message))

            else:

                click.echo(style_error('{}'.format(e)))
                exit_code = 1
                raise

        else:

            if cmd == u'auth':

                self._print_welcome(url, session_details)

            elif cmd == 'shell':

                click.clear()
                try:
                    self._print_welcome(url, session_details)
                except Exception as e:
                    click.echo('err: {}'.format(e))

                prompt_kwargs = {
                    'history': self._history,
                }

                shell_task = loop.create_task(
                    repl.repl(
                        ctx,
                        get_bottom_toolbar_tokens=self.
                        _get_bottom_toolbar_tokens,
                        # get_prompt_tokens=self._get_prompt_tokens,
                        style=self._style,
                        prompt_kwargs=prompt_kwargs))

                loop.run_until_complete(shell_task)

            else:
                # should not arrive here, as we checked cmd in the beginning
                raise Exception('logic error')

        finally:
            loop.close()
            sys.exit(exit_code)
Ejemplo n.º 44
0
        if USE_STREAMING_TESTEE:
            self.setProtocolOptions(failByDrop=True)  # needed for streaming mode
        else:
            # enable permessage-deflate WebSocket protocol extension
            def accept(offers):
                for offer in offers:
                    if isinstance(offer, PerMessageDeflateOffer):
                        return PerMessageDeflateOfferAccept(offer)

            self.setProtocolOptions(perMessageCompressionAccept=accept)


if __name__ == '__main__':

    txaio.start_logging(level='info')

    factory = TesteeServerFactory(u"ws://127.0.0.1:9001")

    loop = asyncio.get_event_loop()
    coro = loop.create_server(factory, port=9001)
    server = loop.run_until_complete(coro)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    finally:
        server.close()
        loop.close()
Ejemplo n.º 45
0
def run(components, log_level='info'):
    """
    High-level API to run a series of components.

    This will only return once all the components have stopped
    (including, possibly, after all re-connections have failed if you
    have re-connections enabled). Under the hood, this calls

    XXX fixme for asyncio

    -- if you wish to manage the loop yourself, use the
    :meth:`autobahn.asyncio.component.Component.start` method to start
    each component yourself.

    :param components: the Component(s) you wish to run
    :type components: Component or list of Components

    :param log_level: a valid log-level (or None to avoid calling start_logging)
    :type log_level: string
    """

    # actually, should we even let people "not start" the logging? I'm
    # not sure that's wise... (double-check: if they already called
    # txaio.start_logging() what happens if we call it again?)
    if log_level is not None:
        txaio.start_logging(level=log_level)
    loop = asyncio.get_event_loop()
    if loop.is_closed():
        asyncio.set_event_loop(asyncio.new_event_loop())
        loop = asyncio.get_event_loop()
        txaio.config.loop = loop
    log = txaio.make_logger()

    # see https://github.com/python/asyncio/issues/341 asyncio has
    # "odd" handling of KeyboardInterrupt when using Tasks (as
    # run_until_complete does). Another option is to just resture
    # default SIGINT handling, which is to exit:
    #   import signal
    #   signal.signal(signal.SIGINT, signal.SIG_DFL)

    @asyncio.coroutine
    def nicely_exit(signal):
        log.info("Shutting down due to {signal}", signal=signal)

        tasks = asyncio.Task.all_tasks()
        for task in tasks:
            # Do not cancel the current task.
            if task is not asyncio.Task.current_task():
                task.cancel()

        def cancel_all_callback(fut):
            try:
                fut.result()
            except asyncio.CancelledError:
                log.debug("All task cancelled")
            except Exception as e:
                log.error("Error while shutting down: {exception}", exception=e)
            finally:
                loop.stop()

        fut = asyncio.gather(*tasks)
        fut.add_done_callback(cancel_all_callback)

    try:
        loop.add_signal_handler(signal.SIGINT, lambda: asyncio.ensure_future(nicely_exit("SIGINT")))
        loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.ensure_future(nicely_exit("SIGTERM")))
    except NotImplementedError:
        # signals are not available on Windows
        pass

    def done_callback(loop, arg):
        loop.stop()

    # returns a future; could run_until_complete() but see below
    component._run(loop, components, done_callback)

    try:
        loop.run_forever()
        # this is probably more-correct, but then you always get
        # "Event loop stopped before Future completed":
        # loop.run_until_complete(f)
    except asyncio.CancelledError:
        pass
    # finally:
    #     signal.signal(signal.SIGINT, signal.SIG_DFL)
    #     signal.signal(signal.SIGTERM, signal.SIG_DFL)

    # Close the event loop at the end, otherwise an exception is
    # thrown. https://bugs.python.org/issue23548
    loop.close()
                for offer in offers:
                    if isinstance(offer, PerMessageDeflateOffer):
                        return PerMessageDeflateOfferAccept(offer)

            self.setProtocolOptions(perMessageCompressionAccept=accept)


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Autobahn Testee Server (Twisted)')
    parser.add_argument('--url', dest='url', type=str, default=u'ws://127.0.0.1:9001', help='The WebSocket fuzzing server URL.')
    parser.add_argument('--loglevel', dest='loglevel', type=str, default=u'info', help='Log level, eg "info" or "debug".')

    options = parser.parse_args()

    txaio.start_logging(level=options.loglevel)

    factory = TesteeServerFactory(options.url)

    _, _, port, _, _, _ = parse_url(options.url)

    loop = asyncio.get_event_loop()
    coro = loop.create_server(factory, port=port)
    server = loop.run_until_complete(coro)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    finally:
        server.close()
Ejemplo n.º 47
0
    com.werewolf.{game}.close_phase                 {"killed": "" or null,
                                                     "resurrected": "" or null,
                                                     "winner": "" or null}
"""

import asyncio
from typing import Dict, List, Optional

import txaio
from autobahn.asyncio.component import Component
from autobahn.wamp.interfaces import ISession

from .models import Game, Notifier, Player

txaio.use_asyncio()
txaio.start_logging(level="debug")  # pylint: disable=no-member


class WampNotifier(Notifier):
    def __init__(self, session):
        self._session = session

    def send_to_players(self, game_name: str, players: List[Player],
                        subject: str, message):
        """
        Send message to player.
        """
        for player in players:
            self._session.publish(
                f"com.werewolf.{game_name}.user.{player.name}.{subject}",
                message)
Ejemplo n.º 48
0
def test_invalid_level(framework):
    try:
        txaio.start_logging(level='foo')
        assert False, "should get exception"
    except RuntimeError as e:
        assert 'Invalid log level' in str(e)
Ejemplo n.º 49
0
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Enable verbose processing output.')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Enable debug output.')

    options = parser.parse_args()

    import txaio
    txaio.use_twisted()

    log = txaio.make_logger()
    txaio.start_logging(level='debug' if options.debug else 'info')

    infile_path = os.path.abspath(options.infile)
    with open(infile_path, 'rb') as f:
        buf = f.read()

    log.info('Loading FlatBuffers JSON schema ({} bytes) ...'.format(len(buf)))

    try:
        schema = json.loads(buf, encoding='utf8')
    except Exception as e:
        log.error(e)

    if options.verbose:
        log.info('Schema metadata:')
        schema_meta_str = pprint.pformat(schema['meta'])
Ejemplo n.º 50
0
def _startlog(options, reactor):
    """
    Start the logging in a way that all the subcommands can use it.
    """
    from twisted.logger import globalLogPublisher
    from txaio import start_logging, set_global_log_level

    loglevel = getattr(options, "loglevel", "info")
    logformat = getattr(options, "logformat", "none")
    colour = getattr(options, "colour", "auto")

    set_global_log_level(loglevel)

    # The log observers (things that print to stderr, file, etc)
    observers = []

    if getattr(options, "logtofile", False):
        # We want to log to a file
        from crossbar._logging import make_logfile_observer

        if not options.logdir:
            logdir = options.cbdir
        else:
            logdir = options.logdir

        logfile = os.path.join(logdir, "node.log")

        if loglevel in ["error", "warn", "info"]:
            show_source = False
        else:
            show_source = True

        observers.append(make_logfile_observer(logfile, show_source))
    else:
        # We want to log to stdout/stderr.
        from crossbar._logging import make_stdout_observer
        from crossbar._logging import make_stderr_observer
        from crossbar._logging import LogLevel

        if colour == "auto":
            if sys.__stdout__.isatty():
                colour = True
            else:
                colour = False
        elif colour == "true":
            colour = True
        else:
            colour = False

        if loglevel == "none":
            # Do no logging!
            pass
        elif loglevel in ["error", "warn", "info"]:
            # Print info to stdout, warn+ to stderr
            observers.append(make_stdout_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=False,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "debug":
            # Print debug+info to stdout, warn+ to stderr, with the class
            # source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        elif loglevel == "trace":
            # Print trace+, with the class source
            observers.append(make_stdout_observer(show_source=True,
                                                  levels=(LogLevel.info,
                                                          LogLevel.debug),
                                                  format=logformat,
                                                  trace=True,
                                                  colour=colour))
            observers.append(make_stderr_observer(show_source=True,
                                                  format=logformat,
                                                  colour=colour))
        else:
            assert False, "Shouldn't ever get here."

    for observer in observers:
        globalLogPublisher.addObserver(observer)

        # Make sure that it goes away
        reactor.addSystemEventTrigger('after', 'shutdown',
                                      globalLogPublisher.removeObserver, observer)

    # Actually start the logger.
    start_logging(None, loglevel)
Ejemplo n.º 51
0
def run(main=None, parser=None):

    # parse command line arguments
    parser = parser or argparse.ArgumentParser()

    parser.add_argument('--debug',
                        dest='debug',
                        action='store_true',
                        default=False,
                        help='Enable logging at level "debug".')
    parser.add_argument('--url',
                        dest='url',
                        type=str,
                        default='wss://master.xbr.network/ws',
                        help='Management service of the XBR Network'
                        '(default: wss://master.xbr.network/ws')
    parser.add_argument(
        '--realm',
        dest='realm',
        type=str,
        default=None,
        help='The (management) realm to join on the management server')
    parser.add_argument(
        '--keyfile',
        dest='keyfile',
        type=str,
        default=None,
        help='The private client key file to use for authentication.')
    parser.add_argument('--authmethod',
                        dest='authmethod',
                        type=str,
                        default='cryptosign',
                        help='Authentication method: cryptosign or anonymous')

    args = parser.parse_args()

    if args.debug:
        txaio.start_logging(level='debug')
    else:
        txaio.start_logging(level='info')

    args.keyfile = os.path.abspath(os.path.expanduser(args.keyfile))
    print('usering keyfile from', args.keyfile)

    extra = None
    if args.authmethod == 'cryptosign':

        # for authenticating the management client, we need a Ed25519 public/private key pair
        # here, we are reusing the user key - so this needs to exist before
        privkey_file = os.path.expanduser(args.keyfile)
        privkey_hex = None
        user_id = None

        if not os.path.exists(privkey_file):
            raise Exception(
                'private key file {} does not exist'.format(privkey_file))
        else:
            with open(privkey_file, 'r') as f:
                data = f.read()
                for line in data.splitlines():
                    if line.startswith('private-key-ed25519'):
                        privkey_hex = line.split(':')[1].strip()
                    if line.startswith('user-id'):
                        user_id = line.split(':')[1].strip()

        if privkey_hex is None:
            raise Exception('no private key found in keyfile!')

        if user_id is None:
            raise Exception('no user ID found in keyfile!')

        key = cryptosign.CryptosignKey.from_bytes(
            binascii.a2b_hex(privkey_hex))

        extra = {
            'args': args,
            'key': key,
            'authid': user_id,
            'main': main,
            'return_code': None
        }

    elif args.authmethod == 'anonymous':

        extra = {'args': args, 'main': main, 'return_code': None}

    else:
        raise Exception('logic error')

    runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra)

    runner.run(ShellClient)

    return_code = extra['return_code']
    if isinstance(return_code, int) and return_code != 0:
        sys.exit(return_code)
        self.disconnect()

    def onDisconnect(self):
        self.log.info('transport disconnected')
        # this is to clean up stuff. it is not our business to
        # possibly reconnect the underlying connection
        self._countdown -= 1
        if self._countdown <= 0:
            try:
                reactor.stop()
            except ReactorNotRunning:
                pass


if __name__ == '__main__':
    txaio.start_logging(level='debug')

    # create a WAMP session object. this is reused across multiple
    # reconnects (if automatically reconnected)
    session = MyAppSession(ComponentConfig(u'crossbardemo', {}))

    # use WAMP-over-RawSocket
    # runner = ApplicationRunner(u'rs://localhost:8080', u'crossbardemo')

    # alternatively, use WAMP-over-Unix-socket
    runner = ApplicationRunner(
        u'ws://localhost:8080',
        u'crossbardemo',
    )

    # alternatively, use WAMP-over-WebSocket plain (standalone, not hooked in under Twisted Web)
Ejemplo n.º 53
0
    def run(self, make, logging_level='info'):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable
        """
        def create():
            cfg = ComponentConfig(self.realm, self.extra)
            try:
                session = make(cfg)
            except Exception:
                self.log.failure("App session could not be created! ")
                asyncio.get_event_loop().stop()
            else:
                return session

        parsed_url = urlparse(self.url)

        if parsed_url.scheme == 'tcp':
            is_unix = False
            if not parsed_url.hostname or not parsed_url.port:
                raise ValueError('Host and port is required in URL')
        elif parsed_url.scheme == 'unix' or parsed_url.scheme == '':
            is_unix = True
            if not parsed_url.path:
                raise ValueError('Path to unix socket must be in URL')

        transport_factory = WampRawSocketClientFactory(
            create, serializer=self.serializer)

        loop = asyncio.get_event_loop()
        if logging_level == 'debug':
            loop.set_debug(True)
        txaio.use_asyncio()
        txaio.config.loop = loop

        try:
            loop.add_signal_handler(signal.SIGTERM, loop.stop)
        except NotImplementedError:
            # signals are not available on Windows
            pass

        def handle_error(loop, context):
            self.log.error('Application Error: {err}', err=context)
            loop.stop()

        loop.set_exception_handler(handle_error)

        if is_unix:
            coro = loop.create_unix_connection(transport_factory,
                                               parsed_url.path)
        else:
            coro = loop.create_connection(transport_factory,
                                          parsed_url.hostname, parsed_url.port)
        (_transport, protocol) = loop.run_until_complete(coro)

        txaio.start_logging(level=logging_level)  # @UndefinedVariable

        try:
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        self.log.debug('Left main loop waiting for completion')
        # give Goodbye message a chance to go through, if we still
        # have an active session
        # it's not working now - because protocol is_closed must return Future
        if protocol._session:
            loop.run_until_complete(protocol._session.leave())

        loop.close()
Ejemplo n.º 54
0
    def run(self, make, start_reactor=True, auto_reconnect=False, log_level='info'):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_reactor: if True (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.

        :returns: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor
            txaio.start_logging(level=log_level)

        if callable(make):
            # factory for use ApplicationSession
            def create():
                cfg = ComponentConfig(self.realm, self.extra)
                try:
                    session = make(cfg)
                except Exception:
                    self.log.failure('ApplicationSession could not be instantiated: {log_failure.value}')
                    if start_reactor and reactor.running:
                        reactor.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith(u'rs'):
            # try to parse RawSocket URL ..
            isSecure, host, port = parse_rs_url(self.url)

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(create)

        else:
            # try to parse WebSocket URL ..
            isSecure, host, port, resource, path, params = parse_ws_url(self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server ..
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage_delate responses from the server ..
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # set WebSocket options for all client connections
            transport_factory.setProtocolOptions(maxFramePayloadSize=1048576,
                                                 maxMessagePayloadSize=1048576,
                                                 autoFragmentSize=65536,
                                                 failByDrop=False,
                                                 openHandshakeTimeout=2.5,
                                                 closeHandshakeTimeout=1.,
                                                 tcpNoDelay=True,
                                                 autoPingInterval=10.,
                                                 autoPingTimeout=5.,
                                                 autoPingSize=4,
                                                 perMessageCompressionOffers=offers,
                                                 perMessageCompressionAccept=accept)

        # supress pointless log noise
        transport_factory.noisy = False

        # if user passed ssl= but isn't using isSecure, we'll never
        # use the ssl argument which makes no sense.
        context_factory = None
        if self.ssl is not None:
            if not isSecure:
                raise RuntimeError(
                    'ssl= argument value passed to %s conflicts with the "ws:" '
                    'prefix of the url argument. Did you mean to use "wss:"?' %
                    self.__class__.__name__)
            context_factory = self.ssl
        elif isSecure:
            from twisted.internet.ssl import optionsForClientTLS
            context_factory = optionsForClientTLS(host)

        from twisted.internet import reactor
        if self.proxy is not None:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
            transport_factory.contextFactory = context_factory
        elif isSecure:
            from twisted.internet.endpoints import SSL4ClientEndpoint
            assert context_factory is not None
            client = SSL4ClientEndpoint(reactor, host, port, context_factory)
        else:
            from twisted.internet.endpoints import TCP4ClientEndpoint
            client = TCP4ClientEndpoint(reactor, host, port)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        use_service = False
        if auto_reconnect:
            try:
                # since Twisted 16.1.0
                from twisted.application.internet import ClientService
                use_service = True
            except ImportError:
                use_service = False

        if use_service:
            self.log.debug('using t.a.i.ClientService')
            # this is automatically reconnecting
            service = ClientService(client, transport_factory)
            service.startService()
            d = service.whenConnected()
        else:
            # this is only connecting once!
            self.log.debug('using t.i.e.connect()')
            d = client.connect(transport_factory)

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()
            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if we exited due to a connection error, raise that to the
            # caller
            if connect_error.exception:
                raise connect_error.exception

        else:
            # let the caller handle any errors
            return d
Ejemplo n.º 55
0
			#break?

	def gen_range(x):
		#yield from range(x) # python3 only
		for i in range(x):
			yield i

	try:
		res = yield conn._call_with_streams("print_stream", 1337, gen_range(3)).addTimeout(5, reactor)
		assert res == 1337
	except defer.TimeoutError:
		print("Error: Timed out after 5 seconds")

	#while False:
	#	result = yield conn.echo("0123456789"*100000)
	#	#print(result)
	yield sleep(5)
	yield conn._lose()
	cerpcerus.stopreactor(reactor)

if __name__ == ""__main__":

	import logging
	logging.basicConfig(level=logging.DEBUG, format="%(levelname)s\t%(name)s\t%(funcName)s\t%(message)s")

	import txaio
	txaio.start_logging(level="debug")

	reactor.callWhenRunning(Task)
	reactor.run()