Exemple #1
0
def test_component_cryptosign_auth(reactor, component_crossbar):

    joined = Deferred()

    def main(reactor, session):
        joined.callback(session)
        return session.leave()

    component = Component(
        transports=[
            {
                u"url": u"ws://*****:*****@example.com",
                u"authrole": u"authenticated",
            }
        },
        realm=u"auth_realm",
        main=main,
    )

    yield component.start(reactor)
    yield joined
        def test_cancel(self, fake_sleep):
            """
            if we start a component but call .stop before it connects, ever,
            it should still exit properly
            """
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                })

            def connect(factory, **kw):
                return Deferred()

            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                d = component.start(reactor=reactor)
                component.stop()
                yield d
        def test_connect_no_auth_method(self, fake_sleep):
            endpoint = Mock()

            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                },
                is_fatal=lambda e: True,
            )

            def connect(factory, **kw):
                proto = factory.buildProtocol('boom')
                proto.makeConnection(Mock())

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                              b"Upgrade: websocket\x0d\x0a"
                              b"Connection: upgrade\x0d\x0a"
                              b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                              b"Sec-Websocket-Accept: " +
                              b64encode(sha1(key).digest()) +
                              b"\x0d\x0a\x0d\x0a")
                proto.processHandshake()

                from autobahn.wamp import role
                subrole = role.RoleSubscriberFeatures()

                msg = Hello("realm",
                            roles=dict(subscriber=subrole),
                            authmethods=["anonymous"])
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Abort(reason="wamp.error.no_auth_method")
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(False, 100, "wamp.error.no_auth_method")

                return succeed(proto)

            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                with self.assertRaises(RuntimeError) as ctx:
                    d = component.start(reactor=reactor)
                    # make sure we fire all our time-outs
                    reactor.advance(3600)
                    yield d
            self.assertIn("Exhausted all transport", str(ctx.exception))
        def test_connect_no_auth_method(self, fake_sleep):
            endpoint = Mock()

            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                },
                is_fatal=lambda e: True,
            )

            def connect(factory, **kw):
                proto = factory.buildProtocol('boom')
                proto.makeConnection(Mock())

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (
                    b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                    b"Upgrade: websocket\x0d\x0a"
                    b"Connection: upgrade\x0d\x0a"
                    b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                    b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a"
                )
                proto.processHandshake()

                from autobahn.wamp import role
                subrole = role.RoleSubscriberFeatures()

                msg = Hello(u"realm", roles=dict(subscriber=subrole), authmethods=[u"anonymous"])
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Abort(reason=u"wamp.error.no_auth_method")
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(False, 100, u"wamp.error.no_auth_method")

                return succeed(proto)
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                with self.assertRaises(RuntimeError) as ctx:
                    d = component.start(reactor=reactor)
                    # make sure we fire all our time-outs
                    reactor.advance(3600)
                    yield d
            self.assertIn(
                "Exhausted all transport",
                str(ctx.exception)
            )
        def test_cancel(self, fake_sleep):
            """
            if we start a component but call .stop before it connects, ever,
            it should still exit properly
            """
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                }
            )

            def connect(factory, **kw):
                return Deferred()
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                d = component.start(reactor=reactor)
                component.stop()
                yield d
def test_reconnect_on_handshake_timeout(request, temp_dir, crossbar, reactor,
                                        virtualenv):
    """
    """

    comp = Component(transports=[{
        "type": "websocket",
        "url": "ws://localhost:6565/ws",
        "max_retries": 2,
        "options": {
            "open_handshake_timeout": .1,
        }
    }])

    errors = []

    @comp.on_connectfailure
    def error(component, e):
        errors.append(e)

    @comp.on_join
    def joined(session, details):
        import time
        time.sleep(2.0)
        print(f"joined: {session} {details}")

    @comp.on_leave
    def left(session, reason):
        print(f"left: {session} {reason}")

    try:
        yield comp.start()
    except Exception as e:
        # will fail, because can't connect
        print(e)
Exemple #7
0
def test_component_wrong_auth(reactor, component_crossbar):
    """
    a component connects which can't authenticate; should get errors
    """
    def main(reactor, session):
        assert False, "should not have joined the session"

    component = Component(
        transports=[
            {
                u"url": u"ws://localhost:7171/auth_ws",
                u"endpoint": {
                    u"type": u"tcp",
                    u"host": u"localhost",
                    u"port": 7171,
                },
                u"max_retries": 1,
            },
        ],
        authentication={
            u"anonymous": {},
        },
        realm=u"auth_realm",
        main=main,
    )

    try:
        yield component.start(reactor)
        assert False, "should fail"
    except Exception as e:
        assert "Exhausted all transport connect attempts" in str(e)
        def test_successful_proxy_connect(self, fake_sleep):
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                    "proxy": {
                        "host": "10.0.0.0",
                        "port": 65000,
                    },
                    "max_retries": 0,
                },
                is_fatal=lambda _: True,
            )

            @component.on_join
            def joined(session, details):
                return session.leave()

            def connect(factory, **kw):
                return succeed(Mock())

            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()

            got_proxy_connect = Deferred()

            def _tcp(host, port, factory, **kw):
                self.assertEqual("10.0.0.0", host)
                self.assertEqual(port, 65000)
                got_proxy_connect.callback(None)
                return endpoint.connect(factory._wrappedFactory)

            reactor.connectTCP = _tcp

            with replace_loop(reactor):
                d = component.start(reactor=reactor)

                def done(x):
                    if not got_proxy_connect.called:
                        got_proxy_connect.callback(x)

                # make sure we fire all our time-outs
                d.addCallbacks(done, done)
                reactor.advance(3600)
                return got_proxy_connect
Exemple #9
0
def test_component_start_twice(reactor, component_crossbar):
    """
    a component which start()s twice
    """

    sessions = []

    def main(reactor, session):
        sessions.append(session)
        return session.leave()

    component = Component(
        transports=[
            {
                u"url": u"ws://*****:*****@example.com",
                u"authrole": u"authenticated",
            }
        },
        realm=u"auth_realm",
        main=main,
    )

    d0 = component.start(reactor)
    yield d0
    d1 = component.start(reactor)
    yield d1
    assert len(sessions) == 2
        def test_successful_proxy_connect(self, fake_sleep):
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    u"type": u"websocket",
                    u"url": u"ws://127.0.0.1/ws",
                    u"endpoint": endpoint,
                    u"proxy": {
                        u"host": u"10.0.0.0",
                        u"port": 65000,
                    },
                    u"max_retries": 0,
                },
                is_fatal=lambda _: True,
            )

            @component.on_join
            def joined(session, details):
                return session.leave()

            def connect(factory, **kw):
                return succeed(Mock())
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()

            got_proxy_connect = Deferred()

            def _tcp(host, port, factory, **kw):
                self.assertEqual("10.0.0.0", host)
                self.assertEqual(port, 65000)
                got_proxy_connect.callback(None)
                return endpoint.connect(factory._wrappedFactory)
            reactor.connectTCP = _tcp

            with replace_loop(reactor):
                d = component.start(reactor=reactor)

                def done(x):
                    if not got_proxy_connect.called:
                        got_proxy_connect.callback(x)
                # make sure we fire all our time-outs
                d.addCallbacks(done, done)
                reactor.advance(3600)
                return got_proxy_connect
Exemple #11
0
def test_verification_fails(reactor, crypto_crossbar, request,
                            self_signed_cert):
    """
    TLS fails to a self-signed cert
    """

    tls_client = Component(
        transports=u"wss://localhost:6464/tls_ws",
        is_fatal=lambda _: True,
    )
    d = tls_client.start(reactor)
    try:
        session = yield d
        assert False, "Connection should fail due to certificate error"
    except Exception as e:
        print("failed (we wanted this): {}".format(e))
        def test_cancel_while_waiting(self):
            """
            if we start a component but call .stop before it connects, ever,
            it should still exit properly -- even if we're 'between'
            connection attempts
            """
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(transports={
                "type": "websocket",
                "url": "ws://127.0.0.1/ws",
                "endpoint": endpoint,
                "max_retries": 0,
                "max_retry_delay": 5,
                "initial_retry_delay": 5,
            }, )

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):

                def connect(factory, **kw):
                    d = Deferred()
                    reactor.callLater(
                        10, d.errback(RuntimeError("no connect for yo")))
                    return d

                endpoint.connect = connect

                d0 = component.start(reactor=reactor)
                assert component._delay_f is not None
                assert not component._done_f.called

                d1 = component.stop()
                assert component._done_f is None
                assert d0.called

                yield d1
                yield d0
        def test_cancel_while_waiting(self):
            """
            if we start a component but call .stop before it connects, ever,
            it should still exit properly -- even if we're 'between'
            connection attempts
            """
            endpoint = Mock()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                    u"max_retries": 0,
                    u"max_retry_delay": 5,
                    u"initial_retry_delay": 5,
                },
            )

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):

                def connect(factory, **kw):
                    d = Deferred()
                    reactor.callLater(10, d.errback(RuntimeError("no connect for you")))
                    return d
                endpoint.connect = connect

                d0 = component.start(reactor=reactor)
                assert component._delay_f is not None
                assert not component._done_f.called

                d1 = component.stop()
                assert component._done_f is None
                assert d0.called

                yield d1
                yield d0
Exemple #14
0
def main(reactor):
    component = Component(
        transports=u"ws://localhost:8080/ws",
        realm=u"crossbardemo",
    )
    app = Klein()
    webapp = WebApplication(app, component)

    # have our Web site listen on 8090
    site = Site(app.resource())
    server_ep = TCP4ServerEndpoint(reactor, 8090)
    port = yield server_ep.listen(site)
    print("Web application on {}".format(port))

    # we don't *have* to hand over control of the reactor to
    # component.run -- if we don't want to, we call .start()
    # The Deferred it returns fires when the component is "completed"
    # (or errbacks on any problems).
    comp_d = component.start(reactor)

    # When not using run() we also must start logging ourselves.
    import txaio
    txaio.start_logging(level='info')

    # If the Component raises an exception we want to exit. Note that
    # things like failing to connect will be swallowed by the
    # re-connection mechanisms already so won't reach here.

    def _failed(f):
        print("Component failed: {}".format(f))
        done.errback(f)

    comp_d.addErrback(_failed)

    # wait forever (unless the Component raises an error)
    done = Deferred()
    yield done
Exemple #15
0
def main(reactor):
    component = Component(
        transports=u"ws://localhost:8080/ws",
        realm=u"crossbardemo",
    )
    app = Klein()
    webapp = WebApplication(app, component)

    # have our Web site listen on 8090
    site = Site(app.resource())
    server_ep = TCP4ServerEndpoint(reactor, 8090)
    port = yield server_ep.listen(site)
    print("Web application on {}".format(port))

    # we don't *have* to hand over control of the reactor to
    # component.run -- if we don't want to, we call .start()
    # The Deferred it returns fires when the component is "completed"
    # (or errbacks on any problems).
    comp_d = component.start(reactor)

    # When not using run() we also must start logging ourselves.
    import txaio
    txaio.start_logging(level='info')

    # If the Component raises an exception we want to exit. Note that
    # things like failing to connect will be swallowed by the
    # re-connection mechanisms already so won't reach here.

    def _failed(f):
        print("Component failed: {}".format(f))
        done.errback(f)
    comp_d.addErrback(_failed)

    # wait forever (unless the Component raises an error)
    done = Deferred()
    yield done
Exemple #16
0
def main(reactor):
    # Because we're using a self-signed certificate, we need to tell Twisted
    # that it is OK to trust it.
    cert_fname = (".crossbar/server_cert.pem")
    cert = crypto.load_certificate(crypto.FILETYPE_PEM,
                                   six.u(open(cert_fname, 'r').read()))
    opt = ssl.CertificateOptions(\
      trustRoot=OpenSSLCertificateAuthorities([cert]))

    # Set up our sisock component.
    component = Component(
        transports=[{
            u"type": u"websocket",
            u"url": sisock.base.WAMP_URI,
            u"endpoint": {
                u"type": u"tcp",
                u"host": sisock.base.SISOCK_HOST,
                u"port": sisock.base.SISOCK_PORT,
                u"tls": opt
            }
        }],
        authentication={
            u"wampcra": {
                u"authid": u"simonsobs",
                u"secret": u"yW4V2T^bPD&rGFwy"
            }
        },
        realm=sisock.base.REALM,
    )

    # Create our klein webserver, and then our datasource (which also connects
    # our component to the WAMP server).
    app = Klein()
    GrafanaSisockDatasrc(app, component)

    # Have our webserver listen for Grafana requests.
    # TODO: use SSL and authentication.
    site = Site(app.resource())
    #server_ep = SSL4ServerEndpoint(reactor, klein_port, opt)
    server_ep = TCP4ServerEndpoint(reactor, klein_port)
    port = yield server_ep.listen(site)
    print("Web application on {}".format(port))

    # We don't *have* to hand over control of the reactor to
    # component.run -- if we don't want to, we call .start()
    # The Deferred it returns fires when the component is "completed"
    # (or errbacks on any problems).
    comp_d = component.start(reactor)

    # When not using run() we also must start logging ourselves.
    txaio.start_logging(level=environ.get("LOGLEVEL", "info"))

    # If the Component raises an exception we want to exit. Note that
    # things like failing to connect will be swallowed by the
    # re-connection mechanisms already so won't reach here.

    def _failed(f):
        print("Component failed: {}".format(f))
        done.errback(f)

    comp_d.addErrback(_failed)

    # Wait forever (unless the Component raises an error).
    done = Deferred()
    yield done
def test_r2r_three(request, reactor, virtualenv, session_temp):
    """
    Test of 'full-mesh' r2r links with three nodes.

    - node0, node1, node2 all have realm1
    - node0, node1, node2 all have "rlink" role for rlink
    - node0 makes rlink connection to node1, node2
    - node1 makes rlink connection to node0, node2
    - node2 makes rlink connection to node0, node1
    - (full-mesh)

    - alice connects to node0
    - alice registers "test.echo"
    - alice subscribes to "test.event"

    - bob connects to node1
    - bob calls "test.echo"
    - bob publishes to "test.event"

    - claire connects to node2
    - claire calls "test.echo"
    - claire publishes to "test.event"

    So, we should receive two calls at "test.echo" and receive two
    events to "test.event" for a successful test.
    """

    node0_dir = join(session_temp, "node0")
    if not os.path.exists(node0_dir):
        os.mkdir(node0_dir)
    node1_dir = join(session_temp, "node1")
    if not os.path.exists(node1_dir):
        os.mkdir(node1_dir)
    node2_dir = join(session_temp, "node2")
    if not os.path.exists(node2_dir):
        os.mkdir(node2_dir)

    # burn in keys so they can match in the configs
    node_keys = [
        (join(node0_dir, "key.pub"), node0_pubkey),
        (join(node0_dir, "key.priv"), node0_privkey),
        (join(node1_dir, "key.pub"), node1_pubkey),
        (join(node1_dir, "key.priv"), node1_privkey),
        (join(node2_dir, "key.pub"), node2_pubkey),
        (join(node2_dir, "key.priv"), node2_privkey),
    ]
    for fname, keydata in node_keys:
        with open(fname, "w") as f:
            f.write(keydata)

    # we start the three nodes in parallel because they all have to
    # connect to the other before they're "started" but we don't know
    # which one will "win" and connect first
    node0_d = start_node(request, reactor, virtualenv, session_temp,
                         node0_config, node0_dir)
    node1_d = start_node(request, reactor, virtualenv, session_temp,
                         node1_config, node1_dir)
    node2_d = start_node(request, reactor, virtualenv, session_temp,
                         node2_config, node2_dir)
    results = yield DeferredList([node0_d, node1_d, node2_d])
    nodes = []
    for ok, res in results:
        if not ok:
            raise res
        nodes.append(res)
    protocol0, protocol1, protocol2 = nodes

    print("Started rlink'd nodes:")

    print("  0: {}".format(protocol0))
    print("  1: {}".format(protocol1))
    print("  2: {}".format(protocol2))

    print("-" * 80)

    # we could wait to see text of each node successfully connecting
    # to the other .. or we just wait a bit.
    yield sleep(10)
    # XXX we could rig this up with crossbar master and then use the
    # management API to determine when all nodes + their rlinks are
    # up..

    # track the events and invocations we get
    events = []
    calls = []
    subscribed_d = Deferred()
    rpc_call_d = Deferred()

    print("start alice")  # run alice first

    alice = Component(
        transports=[
            {
                "url": "ws://localhost:9080/ws",
                "type": "websocket"
            },  # node0
        ],
        realm="realm1",
    )

    @alice.on_join
    @inlineCallbacks
    def alice_join(session, details):
        print("\n\nalice joined\n")

        def a_thing(*args, **kw):
            print("received: a_thing: args={} kw={}".format(args, kw))
            events.append((args, kw))
            if len(events) >= 2:
                reactor.callLater(1, session.leave)

        yield session.subscribe(a_thing, "test.a_thing")

        def rpc(*args, **kw):
            print("call: rpc: args={} kw={}".format(args, kw))
            calls.append((args, kw))
            if len(calls) >= 2:
                reactor.callLater(1, rpc_call_d.callback, None)
            return "rpc return"

        yield session.register(rpc, "test.rpc")
        # XXX we don't know when the rlink registration goes all the way through...
        reactor.callLater(2.0, subscribed_d.callback, None)

    alice_done = alice.start(reactor)

    # wait until Alice actually subscribes before starting bob
    yield subscribed_d
    print("alice is subscribed + registered")

    print("start bob")

    bob = Component(
        transports=[{
            "url": "ws://localhost:9081/ws",  # node1
            "type": "websocket",
        }],
        realm="realm1",
    )

    @bob.on_join
    @inlineCallbacks
    def bob_join(session, details):
        print("bob joined: PID={x_cb_pid}".format(**details.authextra))
        print("publishing 'test.a_thing'")
        p = yield session.publish(
            "test.a_thing",
            3,
            2,
            1,
            options=types.PublishOptions(acknowledge=True))
        print("published {}".format(p))
        res = yield session.call("test.rpc", 1, 2, 3)
        print("test.rpc returned: {}".format(res))
        reactor.callLater(2, session.leave)

    bob_done = bob.start(reactor)
    print("bob is starting", bob_done, alice_done)

    print("start claire")

    claire = Component(
        transports=[{
            "url": "ws://localhost:9081/ws",  # node1
            "type": "websocket",
        }],
        realm="realm1",
    )

    @claire.on_join
    @inlineCallbacks
    def claire_join(session, details):
        print("claire joined: PID={x_cb_pid}".format(**details.authextra))
        print("publishing 'test.a_thing'")
        p = yield session.publish(
            "test.a_thing",
            3,
            2,
            1,
            options=types.PublishOptions(acknowledge=True))
        print("published {}".format(p))
        res = yield session.call("test.rpc", 1, 2, 3)
        print("test.rpc returned: {}".format(res))
        reactor.callLater(2, session.leave)

    claire_done = claire.start(reactor)
    print("claire is starting", claire_done)

    yield rpc_call_d
    yield bob_done
    yield alice_done
    yield claire_done
        def test_successful_connect(self, fake_sleep):
            endpoint = Mock()
            joins = []

            def joined(session, details):
                joins.append((session, details))
                return session.leave()

            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                })
            component.on('join', joined)

            def connect(factory, **kw):
                proto = factory.buildProtocol('ws://localhost/')
                transport = FakeTransport()
                proto.makeConnection(transport)

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                              b"Upgrade: websocket\x0d\x0a"
                              b"Connection: upgrade\x0d\x0a"
                              b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                              b"Sec-Websocket-Accept: " +
                              b64encode(sha1(key).digest()) +
                              b"\x0d\x0a\x0d\x0a")
                proto.processHandshake()

                from autobahn.wamp import role
                features = role.RoleBrokerFeatures(
                    publisher_identification=True,
                    pattern_based_subscription=True,
                    session_meta_api=True,
                    subscription_meta_api=True,
                    subscriber_blackwhite_listing=True,
                    publisher_exclusion=True,
                    subscription_revocation=True,
                    payload_transparency=True,
                    payload_encryption_cryptobox=True,
                )

                msg = Welcome(123456, dict(broker=features), realm='realm')
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Goodbye()
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(True, 100, "some old reason")

                return succeed(proto)

            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                yield component.start(reactor=reactor)
                self.assertTrue(len(joins), 1)
                # make sure we fire all our time-outs
                reactor.advance(3600)
Exemple #19
0
def test_deflate_lots(reactor, crossbar):
    """
    """
    # XXX turning on Deflate seems .. hard
    # XXX also, why do I have to specify max_... in at least 2 places?

    # The extensions offered to the server ..
    offers = [PerMessageDeflateOffer()]

    # Function to accept responses from the server ..
    def accept(response):
        if isinstance(response, PerMessageDeflateResponse):
            return PerMessageDeflateResponseAccept(response,
                                                   max_message_size=1500)

    # we have two components here: one that has a limit on payloads
    # (component0) and one that doesn't (component1). component0 subscribes
    # and then component1 sends it one "small enough" and then one "too big"
    # message (the second should cause component0 to drop its connection).
    component0 = Component(
        transports=[
            {
                u"url": u"ws://localhost:6565/ws",
                u"options": {
                    u"max_frame_payload_size": 1500,
                    u"per_message_compression_offers": offers,
                    u"per_message_compression_accept": accept,
                }
            },
        ],
        realm=u"functest_realm1",
    )
    component1 = Component(
        transports=[
            {
                u"url": u"ws://localhost:6565/ws",
                u"options": {
                    u"per_message_compression_offers": offers,
                    u"per_message_compression_accept": accept,
                }
            },
        ],
        realm=u"functest_realm1",
    )

    listening = Deferred()  # component1 waits until component0 subscribes
    connection_dropped = Deferred()  # we want component0 to do this
    connections = [0]  # how many times component0 has connected

    @component0.on_join
    @inlineCallbacks
    def listen(session, details):
        connections[0] += 1
        if connections[0] == 2:
            print("comp0: re-connected!")
        elif connections[0] == 1:
            # we await (potentially) two messages; if we get the second, the
            # test should fail
            messages = [Deferred(), Deferred()]
            yield session.subscribe(lambda x: messages.pop(0).callback(x),
                                    u"foo")
            listening.callback(None)
            while len(messages):
                msg = yield messages[0]
                print("comp0: message: {}".format(msg))
        print("comp0: done listening")

    @component0.on_disconnect
    def gone(session, was_clean=False):
        print("comp0: session dropped".format(session, was_clean))
        connection_dropped.callback(session)

    @component1.on_join
    @inlineCallbacks
    def send(session, details):
        yield listening
        # this one should be small enough to go through
        yield session.publish(u"foo",
                              u"a" * 20,
                              options=types.PublishOptions(acknowledge=True))

        # this will definitely be over 1500 and should fail (due to the other
        # side's decoder dropping it because the payload is too big). We can't
        # get an error here because the router accepts it, but the other
        # *client* will reject...
        yield session.publish(u"foo",
                              u"a" * 2000,
                              options=types.PublishOptions(acknowledge=True))

    # fail-safe if the test doesn't fail for some other reason, it'll fail
    # after 15s
    timeout = sleep(15)

    done = DeferredList([
        component0.start(reactor),
        component1.start(reactor),
    ])
    yield DeferredList([timeout, done, connection_dropped],
                       fireOnOneErrback=True,
                       fireOnOneCallback=True)

    assert not timeout.called, "shouldn't time out"
    assert connection_dropped.called, "component0 should have dropped connection"
        def test_successful_connect(self, fake_sleep):
            endpoint = Mock()
            joins = []

            def joined(session, details):
                joins.append((session, details))
                return session.leave()
            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                }
            )
            component.on('join', joined)

            def connect(factory, **kw):
                proto = factory.buildProtocol('boom')
                proto.makeConnection(Mock())

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (
                    b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                    b"Upgrade: websocket\x0d\x0a"
                    b"Connection: upgrade\x0d\x0a"
                    b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                    b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a"
                )
                proto.processHandshake()

                from autobahn.wamp import role
                features = role.RoleBrokerFeatures(
                    publisher_identification=True,
                    pattern_based_subscription=True,
                    session_meta_api=True,
                    subscription_meta_api=True,
                    subscriber_blackwhite_listing=True,
                    publisher_exclusion=True,
                    subscription_revocation=True,
                    payload_transparency=True,
                    payload_encryption_cryptobox=True,
                )

                msg = Welcome(123456, dict(broker=features), realm=u'realm')
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Goodbye()
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(True, 100, "some old reason")

                return succeed(proto)
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                yield component.start(reactor=reactor)
                self.assertTrue(len(joins), 1)
                # make sure we fire all our time-outs
                reactor.advance(3600)
def test_roundrobin_proxy(request, reactor, virtualenv):
    """
    Confirm that a proxy with two connections does connections to both
    backends.

    Two nodes each with a router-worker for 'realm1'
    Each node rlink-connects to the other.
    One node has a proxy
    """

    tempdir = _create_temp(request)

    # burn in hard-coded keys so we can refer to the public parts in
    # configs more easily.
    node_keys = [
        (node0_pubkey, node0_privkey),
        (node1_pubkey, node1_privkey),
        (node2_pubkey, node2_privkey),
        (node3_pubkey, node3_privkey),
    ]
    for node_num in range(4):
        node_dir = join(tempdir, "node{}".format(node_num))
        os.mkdir(node_dir)

        pub, priv = node_keys[node_num]
        with open(join(node_dir, "key.pub"), "w") as f:
            f.write(pub)
        with open(join(node_dir, "key.priv"), "w") as f:
            f.write(priv)

    # we start the nodes in parallel because we don't know which one
    # will "win" and connect first
    node_setup = [
        (node0_config, join(tempdir, "node0")),
        (node1_config, join(tempdir, "node1")),
        (node2_config, join(tempdir, "node2")),
        (node3_config, join(tempdir, "node3")),
    ]
    node_starts = []
    for node_config, node_dir in node_setup:
        node_d = start_node(request, reactor, virtualenv, node_config, node_dir)
        node_starts.append(node_d)
    print("-" * 80)
    print(node_starts)
    results = yield DeferredList(node_starts)
    print("-" * 80)
    print(results)
    print("-" * 80)
    nodes = []
    for ok, res in results:
        if not ok:
            raise res
        nodes.append(res)
    protocol0, protocol1, protocol2, protocol3 = nodes

    print("Started rlink'd nodes:")

    print("  0: {}".format(protocol0))
    print("  1: {}".format(protocol1))
    print("  2: {}".format(protocol2))
    print("  3: {}".format(protocol3))

    print("-" * 80)

    # we could wait to see text of each node successfully connecting
    # to the other .. or we just wait a bit.
    yield sleep(5)

    subscribed_d = Deferred()
    rpc_call_d = Deferred()
    print("start alice")
    # run alice first

    alice = Component(
        transports=[
            {"url": "ws://localhost:7070/ws", "type": "websocket"},  # proxy0
        ],
        realm="realm1",
    )

    @alice.on_join
    @inlineCallbacks
    def alice_join(session, details):
        print("\n\nalice joined\n")

        def a_thing(*args, **kw):
            print("received: a_thing: args={} kw={}".format(args, kw))
            reactor.callLater(3, session.leave)
        yield session.subscribe(a_thing, "test.a_thing")

        def rpc(*args, **kw):
            print("call: rpc: args={} kw={}".format(args, kw))
            reactor.callLater(1, rpc_call_d.callback, None)
            return "rpc return"
        yield session.register(rpc, "test.rpc")
        # XXX we don't know when the rlink registration goes all the way through...
        reactor.callLater(2.0, subscribed_d.callback, None)

    alice_done = alice.start(reactor)

    # wait until Alice actually subscribes (and thus is also registered) before starting bob
    yield subscribed_d
    print("alice is subscribed + registered")

    print("start bob")

    bob = Component(
        transports=[{
            "url": "ws://localhost:7070/ws",  # node0 XXX should be node1
            "type": "websocket",
        }],
        realm="realm1",
    )

    @bob.on_join
    @inlineCallbacks
    def bob_join(session, details):
        print("bob joined: PID={x_cb_pid}".format(**details.authextra))
        print("publishing 'test.a_thing'")
        p = yield session.publish("test.a_thing", 3, 2, 1, options=types.PublishOptions(acknowledge=True))
        print("published {}".format(p))
        res = yield session.call("test.rpc", 1, 2, 3)
        print("test.rpc returned: {}".format(res))
        reactor.callLater(2, session.leave)

    bob_done = bob.start(reactor)
    print("bob is starting", bob_done, alice_done)
    yield rpc_call_d
    yield bob_done
    yield alice_done

    # do a bunch of pubs in different sessions to prove we're hitting
    # different proxies and different router processes.

    received = []
    connects = []

    carol = Component(
        transports=[{
            "url": "ws://*****:*****@carol.subscribe("multiverse", types.SubscribeOptions(details=True))
    def _(*args, **kwargs):
        print("SUB: {}".format(kwargs.get('details', None)))
        received.append((args, kwargs))

    carol_ready = Deferred()
    carol.on('ready', carol_ready.callback)
    carol.start()

    yield sleep(3)
    yield carol_ready

    GROUPS = 10
    CONNECTS = 5

    for g in range(GROUPS):
        group = []
        for m in range(CONNECTS):
            client = Component(
                transports=[{
                    "url": "ws://localhost:7070/ws",  # proxy0
                    "type": "websocket",
                }],
                realm="realm1",
            )

            @client.on_join
            @inlineCallbacks
            def _(session, details):
                connects.append(details)
                yield session.publish(
                    u"multiverse", group=g, member=m,
                    options=types.PublishOptions(acknowledge=True)
                )
                yield session.leave()

            group.append(client.start())
        res = yield DeferredList(group)
        for ok, value in res:
            if not ok:
                raise value
    print("-" * 80)
    print("Received {} events".format(len(received)))
    for r in received:
        print(r[1]['details'])

    # some client should get each publish() that we sent

    # FIXME: AssertionError: assert 49 == (10 * 5)
    assert len(received) == GROUPS * CONNECTS
    print("-" * 80)

    # figure out which nodes and proxies we've contacted
    workers = set()
    proxies = set()
    for c in connects:
        workers.add(c.authextra['x_cb_worker'])
        proxies.add(c.authextra['x_cb_proxy_worker'])
        print(c.authextra['x_cb_worker'])
    print("workers: {}".format(workers))
    print("proxies: {}".format(proxies))
    print("-" * 80)
    assert workers == set([
        "node0_worker0",
        "node1_worker0",
        "node2_worker0",
        "node3_worker0",
    ])
    assert proxies == set(["node0_proxy0"])
Exemple #22
0
def test_proxy(request, virtualenv, reactor, session_temp):
    '''
    '''

    cbdir = join(session_temp, "proxy_cb")
    os.mkdir(cbdir)

    # XXX could pytest.mark.paramtrize on transports, for example, to
    # test both websocket and rawsocket -- but then would need to
    # provide the configuration onwards somehow...
    crossbar_config = {
        "version": 2,
        "controller": {
            "id": "node1",
        },
        "workers": [
            {
                "type": "router",
                "realms": [
                    {
                        "name": "foo",
                        "roles": [
                            {
                                "name": "anonymous",
                                "permissions": [
                                    {
                                        "uri": "",
                                        "match": "prefix",
                                        "allow": {
                                            "call": True,
                                            "register": True,
                                            "publish": True,
                                            "subscribe": True
                                        },
                                        "disclose": {
                                            "caller": True,
                                            "publisher": True
                                        },
                                        "cache": True
                                    }
                                ]
                            },
                            {
                                "name": "quux",
                                "permissions": [
                                    {
                                        "uri": "",
                                        "match": "prefix",
                                        "allow": {
                                            "call": True,
                                            "register": True,
                                            "publish": True,
                                            "subscribe": True
                                        },
                                        "disclose": {
                                            "caller": True,
                                            "publisher": True
                                        },
                                        "cache": True
                                    }
                                ]
                            }
                        ]
                    }
                ],
                "transports": [
                    {
                        "type": "rawsocket",
                        "endpoint": {
                            "type": "unix",
                            "path": "router.sock"
                        },
                        "options": {
                            "max_message_size": 1048576
                        },
                        "serializers": ["cbor"],
                        "auth": {
                            "anonymous-proxy": {
                                "type": "static",
                                "role": "quux"
                            }
                        }
                    }
                ]
            },
            {
                "type": "proxy",
                "id": "first_proxy",
                "routes": {
                    "foo": {
                        "quux": "backend_zero",
                        "anonymous": "backend_zero"
                    }
                },
                "connections": {
                    "backend_zero": {
                        "realm": "foo",
                        "transport": {
                            "type": "rawsocket",
                            "endpoint": {
                                "type": "unix",
                                "path": "router.sock",
                                "serializer": "cbor"
                            }
                        },
                        "url": "rs://localhost"
                    }
                },
                "transports": [
                    {
                        "type": "web",
                        "id": "ws_test_0",
                        "endpoint": {
                            "type": "tcp",
                            "port": 8443,
                            "shared": True,
                        },
                        "paths": {
                            "autobahn": {
                                "type": "archive",
                                "archive": "autobahn.zip",
                                "origin": "https://github.com/crossbario/autobahn-js-browser/archive/master.zip",
                                "download": True,
                                "cache": True,
                                "mime_types": {
                                    ".min.js": "text/javascript",
                                    ".jgz": "text/javascript"
                                }
                            },
                            "ws": {
                                "type": "websocket",
                                "serializers": [
                                    "cbor", "msgpack", "json"
                                ],
                                "auth": {
                                    "anonymous": {
                                        "type": "static",
                                        "role": "quux"
                                    }
                                },
                                "options": {
                                    "allowed_origins": ["*"],
                                    "allow_null_origin": True,
                                    "enable_webstatus": True,
                                    "max_frame_size": 1048576,
                                    "max_message_size": 1048576,
                                    "auto_fragment_size": 65536,
                                    "fail_by_drop": True,
                                    "open_handshake_timeout": 2500,
                                    "close_handshake_timeout": 1000,
                                    "auto_ping_interval": 10000,
                                    "auto_ping_timeout": 5000,
                                    "auto_ping_size": 4,
                                    "compression": {
                                        "deflate": {
                                            "request_no_context_takeover": False,
                                            "request_max_window_bits": 13,
                                            "no_context_takeover": False,
                                            "max_window_bits": 13,
                                            "memory_level": 5
                                        }
                                    }
                                }
                            },
                            "info": {
                                "type": "nodeinfo"
                            },
                            "/": {
                                "type": "static",
                                "directory": join(cbdir, "web"),
                                "options": {
                                    "enable_directory_listing": False
                                }
                            }
                        }
                    }
                ]
            },
            {
                "type": "proxy",
                "id": "second_proxy",
                "routes": {
                    "foo": {
                        "quux": "backend_zero",
                        "anonymous": "backend_zero"
                    }
                },
                "connections": {
                    "backend_zero": {
                        "realm": "foo",
                        "transport": {
                            "type": "rawsocket",
                            "endpoint": {
                                "type": "unix",
                                "path": "router.sock",
                                "serializer": "cbor"
                            }
                        },
                        "url": "rs://localhost"
                    }
                },
                "transports": [
                    {
                        "type": "web",
                        "endpoint": {
                            "type": "tcp",
                            "port": 8443,
                            "shared": True,
                        },
                        "paths": {
                            "autobahn": {
                                "type": "archive",
                                "archive": "autobahn.zip",
                                "origin": "https://github.com/crossbario/autobahn-js-browser/archive/master.zip",
                                "download": True,
                                "cache": True,
                                "mime_types": {
                                    ".min.js": "text/javascript",
                                    ".jgz": "text/javascript"
                                }
                            },
                            "ws": {
                                "type": "websocket",
                                "serializers": [
                                    "cbor", "msgpack", "json"
                                ],
                                "options": {
                                    "allowed_origins": ["*"],
                                    "allow_null_origin": True,
                                    "enable_webstatus": True,
                                    "max_frame_size": 1048576,
                                    "max_message_size": 1048576,
                                    "auto_fragment_size": 65536,
                                    "fail_by_drop": True,
                                    "open_handshake_timeout": 2500,
                                    "close_handshake_timeout": 1000,
                                    "auto_ping_interval": 10000,
                                    "auto_ping_timeout": 5000,
                                    "auto_ping_size": 4,
                                    "compression": {
                                        "deflate": {
                                            "request_no_context_takeover": False,
                                            "request_max_window_bits": 13,
                                            "no_context_takeover": False,
                                            "max_window_bits": 13,
                                            "memory_level": 5
                                        }
                                    }
                                }
                            },
                            "info": {
                                "type": "nodeinfo"
                            },
                            "/": {
                                "type": "static",
                                "directory": join(cbdir, "web"),
                                "options": {
                                    "enable_directory_listing": False
                                }
                            },
                        }
                    }
                ]
            }
        ]
    }

    class WaitForTransportAndProxy(object):
        """
        Super hacky, but ... other suggestions? Could busy-wait for ports
        to become connect()-able? Better text to search for?
        """
        def __init__(self, done):
            self.data = ''
            self.done = done
            # found: transport, proxy0, proxy1
            self._found = [False, False, False]

        def write(self, data):
            print(data, end='')
            if self.done.called:
                return

            # in case it's not line-buffered for some crazy reason
            self.data = self.data + data
            if not self._found[0] and "started Transport ws_test_0" in self.data:
                print("Detected transport starting up")
                self._found[0] = True
            if not self._found[1] and "Proxy first_proxy has started" in self.data:
                print("first proxy started")
                self._found[1] = True
            if not self._found[2] and "Proxy second_proxy has started" in self.data:
                print("second proxy started")
                self._found[2] = True
            if all(self._found) and not self.done.called:
                self.done.callback(None)
            if "Address already in use" in self.data:
                self.done.errback(RuntimeError("Address already in use"))

    listening = Deferred()
    protocol = yield start_crossbar(
            reactor, virtualenv,
            cbdir, crossbar_config,
            stdout=WaitForTransportAndProxy(listening),
            stderr=WaitForTransportAndProxy(listening),
            log_level='debug' if request.config.getoption('logdebug', False) else False,
    )
    request.addfinalizer(partial(_cleanup_crossbar, protocol))

    static_content = "<html><body>it worked</body></html>\n"
    os.mkdir(join(cbdir, "web"))
    fname = join(cbdir, "web", "index.html")
    with open(fname, "w") as f:
        f.write(static_content)

    timeout = sleep(40)
    results = yield DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)

    if timeout.called:
        raise RuntimeError("Timeout waiting for crossbar to start")

    # test static resource web responses
    for _ in range(200):
        response = yield treq.get('http://*****:*****@callee.register(u"test.callable")
    def call_test(*args, **kw):
        # print("called: {} {}".format(args, kw))
        return args, kw

    @callee.on_ready
    def _(session):
        callee_ready.callback(None)
    callee.start()

    yield callee_ready

    num_callees = 10
    caller_sessions = []
    results = []
    for _ in range(num_callees):

        @inlineCallbacks
        def main(reactor, session):
            # print("main: {} {}".format(reactor, session))
            r = yield session.call(u"test.callable", "arg0", "arg1", keyword="keyword")
            results.append(r)
            yield session.leave()

        caller = Component(
            transports=[{
                "url": "ws://localhost:8443/ws",
                "type": "websocket"
            }],
            realm="foo",
            authentication={
                "anonymous": {
                    "authrole": "quux"
                }
            },
            main=main,
        )
        caller_sessions.append(caller.start())

    # all calls should complete, and not error
    done = yield DeferredList(caller_sessions)
    for ok, res in done:
        assert ok, "some caller session failed"

    # all calls should have succeeded with the same result
    assert len(results) == num_callees
    for result in results:
        # note: original return-value would have been a tuple, but
        # WAMP only supports lists
        assert result == [["arg0", "arg1"], dict(keyword="keyword")]

    # this just checks that we see "any log lines at all" from both
    # Proxy processes .. maybe we can get a little more specific and
    # look for particular logs?
    proxy_pids = set()
    m = re.compile(" \\[Proxy\\s*([0-9]+)\\] ")
    for line in protocol.logs.getvalue().splitlines():
        x = m.search(line)
        if x:
            # print("PROXY: {}".format(line))
            proxy_pids.add(int(x.group(1)))

    # we should see log-lines from both proxy processes
    assert len(proxy_pids) == 2, "Expected two different proxy processes to log"
Exemple #23
0
async def _test_remote_rlink(request, cfx_master, cfx_edge1, cfx_edge2,
                             cfx_edge3):

    mrealm = 'mrealm1'
    worker_id = 'rlink_worker'
    worker_type = 'router'
    realm_id = 'realm1'
    management_session, management_session_details = await functest_management_session(
        realm=mrealm)

    def clean():
        reactor.callLater(0, management_session.leave)

    request.addfinalizer(clean)
    print(hl(management_session_details, bold=True))

    mgmt = Management(management_session)
    nodes = await mgmt.get_nodes()
    assert len(nodes) == 3, "Should have exactly 3 nodes"
    rlink_coros = []

    # Set up some roles + rlinks (these will be torn down after this test)
    for node in nodes:
        worker = await node.realm_worker(request, worker_id, worker_type)
        realm = await worker.realm(request, realm_id, realm_config(realm_id))
        for role in ["anonymous", "rlink"]:
            await realm.role(request, role, role_config(role))

        # transport for client-style sessions
        await worker.transport(request, "ws000",
                               web_transport_config(node.data["authid"]))

        # rlink transport
        await worker.transport(request, "rlink",
                               rlink_transport_config(node.data["authid"]))

        # note: some of these will connect to rlink transports that
        # don't yet exist; that's okay because we don't await them
        # until those DO exist, so they will succeed at some
        # point...that's why we wait separately for the rlinks-deferreds

        for to_node in nodes:
            if node == to_node:  # no rlink to ourself
                continue
            rlink_coros.append(
                ensureDeferred(
                    realm.rlink_to(
                        to_node,
                        rlink_config(realm_id, node.data["authid"],
                                     to_node.data["authid"]),
                    )))

    # now we await all rlink connections (all listening transports on
    # all nodes are set up now)
    rlinks = await DeferredList(rlink_coros)
    assert len(rlinks) == 6, "Expected exactly 6 rlinks"
    for ok, res in rlinks:
        if not ok:
            raise res
        print(json.dumps(res, indent=4))

    # test some client-type connections

    alice_ready = Deferred()
    alice_got_pub = Deferred()

    alice = Component(
        transports=[client_node_transport("node1")],
        realm=realm_id,
    )

    @alice.register("test.foo")
    def foo(*args, **kw):
        return (args, kw)

    @alice.subscribe("test.pub")
    def bar(*args, **kw):
        alice_got_pub.callback(None)
        return (args, kw)

    alice.on_ready(alice_ready.callback)

    bob_got_result = Deferred()
    bob = Component(
        transports=[client_node_transport("node2")],
        realm=realm_id,
    )

    @bob.on_join
    async def joined(session, details):
        session.publish(u"test.pub", 1, "two", three=4)
        result = await session.call(u"test.foo", 1, 2, 3, "four", key="word")
        # note: tuples become lists in WAMP
        assert result == [[1, 2, 3, "four"], {"key": "word"}]
        bob_got_result.callback(None)

    print("register / call test")
    print("starting alice (to node1)")
    alice.start()
    await alice_ready

    print("starting bob (to node2)")
    bob.start()
    await bob_got_result

    print("waiting for alice to get publication")
    await alice_got_pub

    print("successful register + call test")
Exemple #24
0
def test_r2r(request, reactor, virtualenv):
    """
    Basic test of r2r links: two nodes, two clients.

    - node0 + node1 both have realm1
    - node0 + node1 both have "rlink" role for rlink
    - node0 makes rlink connection to node1
    - node1 makes rlink connection to node0 (full mesh)

    - alice connects to node0
    - alice registers "test.echo"
    - alice subscribes to "test.event"

    - bob connects to node1
    - bob calls "test.echo"
    - bob publishes to "test.event"
    """

    tempdir = _create_temp(request)

    node0_dir = join(tempdir, "node0")
    os.mkdir(node0_dir)
    node1_dir = join(tempdir, "node1")
    os.mkdir(node1_dir)

    # burn in keys so they can match in the configs
    node_keys = [
        (join(node0_dir, "key.pub"), node0_pubkey),
        (join(node0_dir, "key.priv"), node0_privkey),
        (join(node1_dir, "key.pub"), node1_pubkey),
        (join(node1_dir, "key.priv"), node1_privkey),
    ]
    for fname, keydata in node_keys:
        with open(fname, "w") as f:
            f.write(keydata)

    # we start the two nodes in parallel because they each have to
    # connect to the other before they're "started" but we don't know
    # which one will "win" and connect first
    node0_d = start_node(request, reactor, virtualenv, node0_config, node0_dir)
    node1_d = start_node(request, reactor, virtualenv, node1_config, node1_dir)
    results = yield DeferredList([node0_d, node1_d])
    nodes = []
    for ok, res in results:
        if not ok:
            raise res
        nodes.append(res)
    protocol0, protocol1 = nodes

    print("Started rlink'd nodes:")

    print("  0: {}".format(protocol0))
    print("  1: {}".format(protocol1))

    print("-" * 80)

    # we could wait to see text of each node successfully connecting
    # to the other .. or we just wait a bit.
    yield sleep(5)

    subscribed_d = Deferred()
    rpc_call_d = Deferred()
    print("start alice")
    # run alice first

    alice = Component(
        transports=[
            {"url": "ws://localhost:9080/ws", "type": "websocket"},  # node0
        ],
        realm="realm1",
    )

    @alice.on_join
    @inlineCallbacks
    def alice_join(session, details):
        print("\n\nalice joined\n")

        def a_thing(*args, **kw):
            print("received: a_thing: args={} kw={}".format(args, kw))
            reactor.callLater(3, session.leave)
        yield session.subscribe(a_thing, "test.a_thing")

        def rpc(*args, **kw):
            print("call: rpc: args={} kw={}".format(args, kw))
            reactor.callLater(1, rpc_call_d.callback, None)
            return "rpc return"
        yield session.register(rpc, "test.rpc")
        # XXX we don't know when the rlink registration goes all the way through...
        reactor.callLater(2.0, subscribed_d.callback, None)

    alice_done = alice.start(reactor)

    # wait until Alice actually subscribes before starting bob
    yield subscribed_d
    print("alice is subscribed + registered")

    print("start bob")

    bob = Component(
        transports=[{
            "url": "ws://localhost:9081/ws",  # node1
            "type": "websocket",
        }],
        realm="realm1",
    )

    @bob.on_join
    @inlineCallbacks
    def bob_join(session, details):
        print("bob joined: PID={x_cb_pid}".format(**details.authextra))
        print("publishing 'test.a_thing'")
        p = yield session.publish("test.a_thing", 3, 2, 1, options=types.PublishOptions(acknowledge=True))
        print("published {}".format(p))
        res = yield session.call("test.rpc", 1, 2, 3)
        print("test.rpc returned: {}".format(res))
        reactor.callLater(2, session.leave)

    bob_done = bob.start(reactor)
    print("bob is starting", bob_done, alice_done)
    yield rpc_call_d
    yield bob_done
    yield alice_done