def test_batched_cancel(framework_aio): ''' we can cancel uncalled call_laters ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') from asyncio.test_utils import TestLoop def time_gen(): yield yield yield new_loop = TestLoop(time_gen) calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") # advance clock a bit; shouldn't have fired anything yet new_loop.advance_time(1.2) new_loop._run_once() call.cancel() # advancing clock past where we "should" get the call, if it # were still active. new_loop.advance_time(4.0) new_loop._run_once() assert len(calls) == 0
def test_conflict_SSLContext_with_ws_url(self): ''' ApplicationRunner must raise an exception if given an ssl value that is an instance of SSLContext, but only a "ws:" URL. ''' import ssl try: # Try to create an SSLContext, to be as rigorous as we can be # by avoiding making assumptions about the ApplicationRunner # implementation. If we happen to be on a Python that has no # SSLContext, we pass ssl=True, which will simply cause this # test to degenerate to the behavior of # test_conflict_SSL_True_with_ws_url (above). In fact, at the # moment (2015-05-10), none of this matters because the # ApplicationRunner implementation does not check to require # that its ssl argument is either a bool or an SSLContext. But # that may change, so we should be careful. ssl.create_default_context except AttributeError: context = True else: context = ssl.create_default_context() with replace_loop(Mock()) as loop: loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm', ssl=context) error = ('^ssl argument value passed to ApplicationRunner ' 'conflicts with the "ws:" prefix of the url ' 'argument\. Did you mean to use "wss:"\?$') self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
def test_batched_cancel_too_late(framework_aio): ''' nothing bad happens if we cancel() after the callbacks ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') from asyncio.test_utils import TestLoop def time_gen(): yield yield new_loop = TestLoop(time_gen) calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") new_loop.advance_time(2.1) new_loop._run_once() assert len(calls) == 1 call.cancel() assert len(calls) == 1 new_loop.advance_time(1) new_loop._run_once() assert len(calls) == 1
def test_connect_no_auth_method(self, fake_sleep): endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, }, is_fatal=lambda e: True, ) def connect(factory, **kw): proto = factory.buildProtocol('boom') proto.makeConnection(Mock()) from autobahn.websocket.protocol import WebSocketProtocol from base64 import b64encode from hashlib import sha1 key = proto.websocket_key + WebSocketProtocol._WS_MAGIC proto.data = ( b"HTTP/1.1 101 Switching Protocols\x0d\x0a" b"Upgrade: websocket\x0d\x0a" b"Connection: upgrade\x0d\x0a" b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a" b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a" ) proto.processHandshake() from autobahn.wamp import role subrole = role.RoleSubscriberFeatures() msg = Hello(u"realm", roles=dict(subscriber=subrole), authmethods=[u"anonymous"]) serializer = JsonSerializer() data, is_binary = serializer.serialize(msg) proto.onMessage(data, is_binary) msg = Abort(reason=u"wamp.error.no_auth_method") proto.onMessage(*serializer.serialize(msg)) proto.onClose(False, 100, u"wamp.error.no_auth_method") return succeed(proto) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): with self.assertRaises(RuntimeError) as ctx: d = component.start(reactor=reactor) # make sure we fire all our time-outs reactor.advance(3600) yield d self.assertIn( "Exhausted all transport", str(ctx.exception) )
def test_batched_cancel_too_late(framework_aio): ''' nothing bad happens if we cancel() after the callbacks ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') from asyncio.test_utils import TestLoop def time_gen(): yield yield yield new_loop = TestLoop(time_gen) calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") new_loop.advance_time(2.1) new_loop._run_once() assert len(calls) == 1 call.cancel() assert len(calls) == 1 new_loop.advance_time(1) new_loop._run_once() assert len(calls) == 1
def test_call_later_aio(framework_aio): ''' Wait for two Futures. ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') def time_gen(): when = yield assert when == 1 # even though we only do one call, I guess TestLoop needs # a "trailing" yield? "or something" when = yield 0 print("Hmmm", when) from asyncio.test_utils import TestLoop new_loop = TestLoop(time_gen) calls = [] with replace_loop(new_loop) as fake_loop: def foo(*args, **kw): calls.append((args, kw)) delay = txaio.call_later(1, foo, 5, 6, 7, foo="bar") assert len(calls) == 0 assert hasattr(delay, 'cancel') fake_loop.advance_time(2) fake_loop._run_once() assert len(calls) == 1 assert calls[0][0] == (5, 6, 7) assert calls[0][1] == dict(foo="bar")
def test_cancel(self, fake_sleep): """ if we start a component but call .stop before it connects, ever, it should still exit properly """ endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, }) def connect(factory, **kw): return Deferred() endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): d = component.start(reactor=reactor) component.stop() yield d
def test_unclean_timeout(self): """ make a delayed call to drop the connection """ # first we have to drive the protocol to STATE_CLOSING # ... which we achieve by sendCloseFrame after we're in # STATE_OPEN # XXX double-check this is the correct code-path to get here # "normally"? # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue( self.proto.state == WebSocketServerProtocol.STATE_OPEN) with replace_loop(Clock()) as reactor: # now 'do the test' and transition to CLOSING self.proto.sendCloseFrame() # check we scheduled a call self.assertEqual(len(reactor.calls), 1) # now, advance the clock past the call (and thereby # execute it) reactor.advance(self.proto.closeHandshakeTimeout + 1) # we should have called abortConnection self.assertEqual("call.abortConnection()", str(self.proto.transport.method_calls[-1])) self.assertTrue(self.proto.transport.abortConnection.called) # ...too "internal" for an assert? self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
def test_connect_no_auth_method(self, fake_sleep): endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, }, is_fatal=lambda e: True, ) def connect(factory, **kw): proto = factory.buildProtocol('boom') proto.makeConnection(Mock()) from autobahn.websocket.protocol import WebSocketProtocol from base64 import b64encode from hashlib import sha1 key = proto.websocket_key + WebSocketProtocol._WS_MAGIC proto.data = (b"HTTP/1.1 101 Switching Protocols\x0d\x0a" b"Upgrade: websocket\x0d\x0a" b"Connection: upgrade\x0d\x0a" b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a" b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a") proto.processHandshake() from autobahn.wamp import role subrole = role.RoleSubscriberFeatures() msg = Hello("realm", roles=dict(subscriber=subrole), authmethods=["anonymous"]) serializer = JsonSerializer() data, is_binary = serializer.serialize(msg) proto.onMessage(data, is_binary) msg = Abort(reason="wamp.error.no_auth_method") proto.onMessage(*serializer.serialize(msg)) proto.onClose(False, 100, "wamp.error.no_auth_method") return succeed(proto) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): with self.assertRaises(RuntimeError) as ctx: d = component.start(reactor=reactor) # make sure we fire all our time-outs reactor.advance(3600) yield d self.assertIn("Exhausted all transport", str(ctx.exception))
def test_unclean_timeout_client(self): """ make a delayed call to drop the connection (client-side) """ if False: self.proto.debug = True self.proto.factory._log = print # get to STATE_OPEN self.proto.websocket_key = b64decode('6Jid6RgXpH0RVegaNSs/4g==') self.proto.data = mock_handshake_server self.proto.processHandshake() self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_OPEN) self.assertTrue(self.proto.serverConnectionDropTimeout > 0) with replace_loop(Clock()) as reactor: # now 'do the test' and transition to CLOSING self.proto.sendCloseFrame() self.proto.onCloseFrame(1000, "raw reason") # check we scheduled a call self.assertEqual(len(reactor.calls), 1) self.assertEqual(reactor.calls[0].func, self.proto.onServerConnectionDropTimeout) self.assertEqual(reactor.calls[0].getTime(), self.proto.serverConnectionDropTimeout) # now, advance the clock past the call (and thereby # execute it) reactor.advance(self.proto.closeHandshakeTimeout + 1) # we should have called abortConnection self.assertEqual("call.abortConnection()", str(self.proto.transport.method_calls[-1])) self.assertTrue(self.proto.transport.abortConnection.called) # ...too "internal" for an assert? self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
def test_batched_successful_call(framework_tx): ''' ''' from twisted.internet.task import Clock new_loop = Clock() calls = [] with replace_loop(new_loop): def foo(*args, **kw): calls.append((args, kw)) batched = txaio.make_batched_timer(5) # add 3 calls: first 2 should be in the same bucket, 3rd in # another bucket batched.call_later(5.1, foo, "first call") batched.call_later(9.9, foo, "second call") batched.call_later(10.1, foo, "third call") # advancing 4.9 seconds: shouldn't have expired from a bucket new_loop.advance(4.9) assert len(calls) == 0 # tick over past first bucket; first two calls should happen # (the "5s -> 10s" bucket) new_loop.advance(0.2) assert len(calls) == 2 assert calls[0] == (("first call", ), dict()) assert calls[1] == (("second call", ), dict()) # tick into next bucket new_loop.advance(5) assert len(calls) == 3 assert calls[2] == (("third call", ), dict())
def test_batched_chunks_with_errors(framework_tx): ''' errors from batched calls are reported ''' from twisted.internet.task import Clock laters = [] class FakeClock(Clock): def callLater(self, *args, **kw): # noqa laters.append((args, kw)) Clock.callLater(self, *args, **kw) new_loop = FakeClock() calls = [] def foo(*args, **kw): calls.append((args, kw)) def error(*args, **kw): raise RuntimeError("sadness") with replace_loop(new_loop): batched = txaio.make_batched_timer(1, chunk_size=2) batched.call_later(2, foo, "call0") batched.call_later(2, foo, "call1") batched.call_later(2, foo, "call2") batched.call_later(2, error) # notify everything, causing an error from the second batch try: new_loop.advance(2) new_loop.advance(1) assert False, "Should get exception" except RuntimeError as e: assert "processing call_later" in str(e)
def test_create_future_explicit_loop(framework): """ process events on alternate loop= for create_future later """ pytest.importorskip('asyncio') if txaio.using_twisted: pytest.skip() import asyncio alt_loop = asyncio.new_event_loop() txa = txaio.with_config(loop=alt_loop) f = txa.create_future() results = [] f.add_done_callback(lambda r: results.append(r.result())) assert results == [] txaio.resolve(f, 'some result') # run_once() runs the txaio.config.loop so we shouldn't get any # results until we spin alt_loop assert results == [] run_once() assert results == [] with replace_loop(alt_loop): run_once() assert results == ['some result']
def test_create_future_failure_explicit_loop(framework): """ process events on alternate loop= for create_future later """ pytest.importorskip('asyncio') if txaio.using_twisted: pytest.skip() import asyncio alt_loop = asyncio.new_event_loop() the_exception = Exception('bad') txa = txaio.with_config(loop=alt_loop) f = txa.create_future_error(the_exception) results = [] def boom(r): try: results.append(r.result()) except Exception as e: results.append(e) f.add_done_callback(boom) # run_once() runs the txaio.config.loop so we shouldn't get any # results until we spin alt_loop assert results == [] run_once() assert results == [] with replace_loop(alt_loop): run_once() assert results == [the_exception]
def test_call_later_aio(framework_aio): ''' Wait for two Futures. ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') def time_gen(): when = yield assert when == 1 # even though we only do one call, I guess TestLoop needs # a "trailing" yield? "or something" when = yield 0 from asyncio.test_utils import TestLoop new_loop = TestLoop(time_gen) calls = [] with replace_loop(new_loop) as fake_loop: def foo(*args, **kw): calls.append((args, kw)) delay = txaio.call_later(1, foo, 5, 6, 7, foo="bar") assert len(calls) == 0 assert hasattr(delay, 'cancel') fake_loop.advance_time(2) fake_loop._run_once() assert len(calls) == 1 assert calls[0][0] == (5, 6, 7) assert calls[0][1] == dict(foo="bar")
def test_unclean_timeout_client(self): """ make a delayed call to drop the connection (client-side) """ if False: self.proto.factory._log = print # get to STATE_OPEN self.proto.websocket_key = b64decode('6Jid6RgXpH0RVegaNSs/4g==') self.proto.data = mock_handshake_server self.proto.processHandshake() self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_OPEN) self.assertTrue(self.proto.serverConnectionDropTimeout > 0) with replace_loop(Clock()) as reactor: # now 'do the test' and transition to CLOSING self.proto.sendCloseFrame() self.proto.onCloseFrame(1000, b"raw reason") # check we scheduled a call self.assertEqual(len(reactor.calls), 1) self.assertEqual(reactor.calls[0].func, self.proto.onServerConnectionDropTimeout) self.assertEqual(reactor.calls[0].getTime(), self.proto.serverConnectionDropTimeout) # now, advance the clock past the call (and thereby # execute it) reactor.advance(self.proto.closeHandshakeTimeout + 1) # we should have called abortConnection self.assertTrue(self.proto.transport.abort_called()) # ...too "internal" for an assert? self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
def test_cancel(self, fake_sleep): """ if we start a component but call .stop before it connects, ever, it should still exit properly """ endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, } ) def connect(factory, **kw): return Deferred() endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): d = component.start(reactor=reactor) component.stop() yield d
def test_unclean_timeout(self): """ make a delayed call to drop the connection """ # first we have to drive the protocol to STATE_CLOSING # ... which we achieve by sendCloseFrame after we're in # STATE_OPEN # XXX double-check this is the correct code-path to get here # "normally"? # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN) with replace_loop(Clock()) as reactor: # now 'do the test' and transition to CLOSING self.proto.sendCloseFrame() # check we scheduled a call self.assertEqual(len(reactor.calls), 1) # now, advance the clock past the call (and thereby # execute it) reactor.advance(self.proto.closeHandshakeTimeout + 1) # we should have called abortConnection self.assertEqual("call.abortConnection()", str(self.proto.transport.method_calls[-1])) self.assertTrue(self.proto.transport.abortConnection.called) # ...too "internal" for an assert? self.assertEqual(self.proto.state, WebSocketServerProtocol.STATE_CLOSED)
def test_batched_cancel(framework_aio): ''' we can cancel uncalled call_laters ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') from asyncio.test_utils import TestLoop def time_gen(): yield yield new_loop = TestLoop(time_gen) calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") # advance clock a bit; shouldn't have fired anything yet new_loop.advance_time(1.2) new_loop._run_once() call.cancel() # advancing clock past where we "should" get the call, if it # were still active. new_loop.advance_time(4.0) new_loop._run_once() assert len(calls) == 0
def test_auto_pingpong_timeout(self): """ autoping and autoping-timeout timing """ if False: self.proto.debug = True self.proto.factory._log = print self.proto.debugCodePaths = True # options are evaluated in succeedHandshake, called below self.proto.autoPingInterval = 5 self.proto.autoPingTimeout = 2 with replace_loop(Clock()) as reactor: # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue( self.proto.state == WebSocketServerProtocol.STATE_OPEN) # we should have scheduled an autoPing self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto._sendAutoPing, reactor.calls[0].func) # ^^ un-unit-testy to assert on internal method? # advance past first auto-ping timeout reactor.advance(5) # first element from args tuple from transport.write() # call is our data self.assertTrue(self.transport.write.called) data = self.transport.write.call_args[0][0] if _PY3: _data = bytes([data[0]]) else: _data = data[0] # the opcode is the lower 7 bits of the first byte. (opcode, ) = struct.unpack("B", _data) opcode = opcode & (~0x80) # ... and should be "9" for ping self.assertEqual(9, opcode) # Because we have autoPingTimeout there should be # another delayed-called created now self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto.onAutoPingTimeout, reactor.calls[0].func) self.assertNotEqual(self.proto.state, self.proto.STATE_CLOSED) # ...which we'll now cause to trigger, aborting the connection reactor.advance(3) self.assertEqual(self.proto.state, self.proto.STATE_CLOSED)
def test_successful_proxy_connect(self, fake_sleep): endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, "proxy": { "host": "10.0.0.0", "port": 65000, }, "max_retries": 0, }, is_fatal=lambda _: True, ) @component.on_join def joined(session, details): return session.leave() def connect(factory, **kw): return succeed(Mock()) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() got_proxy_connect = Deferred() def _tcp(host, port, factory, **kw): self.assertEqual("10.0.0.0", host) self.assertEqual(port, 65000) got_proxy_connect.callback(None) return endpoint.connect(factory._wrappedFactory) reactor.connectTCP = _tcp with replace_loop(reactor): d = component.start(reactor=reactor) def done(x): if not got_proxy_connect.called: got_proxy_connect.callback(x) # make sure we fire all our time-outs d.addCallbacks(done, done) reactor.advance(3600) return got_proxy_connect
def test_auto_pingpong_timeout(self): """ autoping and autoping-timeout timing """ if False: self.proto.debug = True self.proto.factory._log = print self.proto.debugCodePaths = True # options are evaluated in succeedHandshake, called below self.proto.autoPingInterval = 5 self.proto.autoPingTimeout = 2 with replace_loop(Clock()) as reactor: # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN) # we should have scheduled an autoPing self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto._sendAutoPing, reactor.calls[0].func) # ^^ un-unit-testy to assert on internal method? # advance past first auto-ping timeout reactor.advance(5) # first element from args tuple from transport.write() # call is our data self.assertTrue(self.transport.write.called) data = self.transport.write.call_args[0][0] if _PY3: _data = bytes([data[0]]) else: _data = data[0] # the opcode is the lower 7 bits of the first byte. (opcode,) = struct.unpack("B", _data) opcode = opcode & (~0x80) # ... and should be "9" for ping self.assertEqual(9, opcode) # Because we have autoPingTimeout there should be # another delayed-called created now self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto.onAutoPingTimeout, reactor.calls[0].func) self.assertNotEqual(self.proto.state, self.proto.STATE_CLOSED) # ...which we'll now cause to trigger, aborting the connection reactor.advance(3) self.assertEqual(self.proto.state, self.proto.STATE_CLOSED)
def test_omitted_SSLContext_secure(self): ''' Ensure that loop.create_connection is called with ssl=True if no ssl argument is passed to the __init__ method of ApplicationRunner and the websocket URL starts with "wss:". ''' with replace_loop(Mock()) as loop: with patch.object(asyncio, 'get_event_loop', return_value=loop): loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner(u'wss://127.0.0.1:8080/wss', u'realm') runner.run(self.fail) self.assertIs(True, loop.create_connection.call_args[1]['ssl'])
def test_conflict_SSL_True_with_ws_url(self): ''' ApplicationRunner must raise an exception if given an ssl value of True but only a "ws:" URL. ''' with replace_loop(Mock()) as loop: loop.run_until_complete = Mock(return_value=(Mock(), Mock())) runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm', ssl=True) error = ('^ssl argument value passed to ApplicationRunner ' 'conflicts with the "ws:" prefix of the url ' 'argument\. Did you mean to use "wss:"\?$') self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
def test_explicit_SSLContext(self): ''' Ensure that loop.create_connection is called with the exact SSL context object that is passed (as ssl) to the __init__ method of ApplicationRunner. ''' with replace_loop(Mock()) as loop: with patch.object(asyncio, 'get_event_loop', return_value=loop): loop.run_until_complete = Mock(return_value=(Mock(), Mock())) ssl = {} runner = ApplicationRunner(u'ws://127.0.0.1:8080/ws', u'realm', ssl=ssl) runner.run('_unused_') self.assertIs(ssl, loop.create_connection.call_args[1]['ssl'])
def test_successful_proxy_connect(self, fake_sleep): endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ u"type": u"websocket", u"url": u"ws://127.0.0.1/ws", u"endpoint": endpoint, u"proxy": { u"host": u"10.0.0.0", u"port": 65000, }, u"max_retries": 0, }, is_fatal=lambda _: True, ) @component.on_join def joined(session, details): return session.leave() def connect(factory, **kw): return succeed(Mock()) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() got_proxy_connect = Deferred() def _tcp(host, port, factory, **kw): self.assertEqual("10.0.0.0", host) self.assertEqual(port, 65000) got_proxy_connect.callback(None) return endpoint.connect(factory._wrappedFactory) reactor.connectTCP = _tcp with replace_loop(reactor): d = component.start(reactor=reactor) def done(x): if not got_proxy_connect.called: got_proxy_connect.callback(x) # make sure we fire all our time-outs d.addCallbacks(done, done) reactor.advance(3600) return got_proxy_connect
def test_auto_ping_got_pong(self): """ auto-ping with correct reply cancels timeout """ if False: self.proto.debug = True self.proto.factory._log = print self.proto.debugCodePaths = True # options are evaluated in succeedHandshake, called below self.proto.autoPingInterval = 5 self.proto.autoPingTimeout = 2 with replace_loop(Clock()) as reactor: # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue( self.proto.state == WebSocketServerProtocol.STATE_OPEN) # we should have scheduled an autoPing self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto._sendAutoPing, reactor.calls[0].func) # ^^ un-unit-testy to assert on internal method? # advance past first auto-ping timeout reactor.advance(5) # should have an auto-ping timeout scheduled, and we # save it for later (to check it got cancelled) self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto.onAutoPingTimeout, reactor.calls[0].func) timeout_call = reactor.calls[0] # elsewhere we check that we actually send an opcode-9 # message; now we just blindly inject our own reply # with a PONG frame frame = create_client_frame(opcode=10, payload=self.proto.autoPingPending) self.proto.data = frame # really needed twice; does header first, then rest self.proto.processData() self.proto.processData() # which should have cancelled the call self.assertTrue(timeout_call.cancelled)
def test_batched_successful_call(framework_aio): ''' batched calls really happen in batches ''' # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') from asyncio.test_utils import TestLoop # XXX I *really* don't get the point of these generators... def time_gen(): yield yield yield new_loop = TestLoop(time_gen) calls = [] with replace_loop(new_loop): def foo(*args, **kw): calls.append((args, kw)) batched = txaio.make_batched_timer(5) # add 3 calls: first 2 should be in the same bucket, 3rd in # another bucket batched.call_later(5.1, foo, "first call") batched.call_later(9.9, foo, "second call") batched.call_later(10.1, foo, "third call") # advancing 4.9 seconds: shouldn't have expired from a bucket new_loop.advance_time(4.9) new_loop._run_once() assert len(calls) == 0 # tick over past first bucket; first two calls should happen # (the "5s -> 10s" bucket) new_loop.advance_time(0.2) new_loop._run_once() assert len(calls) == 2 assert calls[0] == (("first call", ), dict()) assert calls[1] == (("second call", ), dict()) # tick into next bucket new_loop.advance_time(5) new_loop._run_once() assert len(calls) == 3 assert calls[2] == (("third call", ), dict())
def test_call_later(): ''' Wait for two Futures. ''' # set up a test reactor or event-loop depending on asyncio or # Twisted twisted = False try: from twisted.internet.task import Clock new_loop = Clock() twisted = True except ImportError: # Trollius doesn't come with this, so won't work on py2 pytest.importorskip('asyncio.test_utils') def time_gen(): when = yield assert when == 1 # even though we only do one call, I guess TestLoop needs # a "trailing" yield? "or something" when = yield 0 print("Hmmm", when) from asyncio.test_utils import TestLoop new_loop = TestLoop(time_gen) calls = [] with replace_loop(new_loop) as fake_loop: def foo(*args, **kw): calls.append((args, kw)) delay = txaio.call_later(1, foo, 5, 6, 7, foo="bar") assert len(calls) == 0 assert hasattr(delay, 'cancel') if twisted: fake_loop.advance(2) else: # XXX maybe we monkey-patch a ".advance()" onto asyncio # loops that does both of these? fake_loop.advance_time(2) fake_loop._run_once() assert len(calls) == 1 assert calls[0][0] == (5, 6, 7) assert calls[0][1] == dict(foo="bar")
def test_auto_ping_got_pong(self): """ auto-ping with correct reply cancels timeout """ if False: self.proto.debug = True self.proto.factory._log = print self.proto.debugCodePaths = True # options are evaluated in succeedHandshake, called below self.proto.autoPingInterval = 5 self.proto.autoPingTimeout = 2 with replace_loop(Clock()) as reactor: # get to STATE_OPEN self.proto.data = mock_handshake_client self.proto.processHandshake() self.assertTrue(self.proto.state == WebSocketServerProtocol.STATE_OPEN) # we should have scheduled an autoPing self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto._sendAutoPing, reactor.calls[0].func) # ^^ un-unit-testy to assert on internal method? # advance past first auto-ping timeout reactor.advance(5) # should have an auto-ping timeout scheduled, and we # save it for later (to check it got cancelled) self.assertEqual(1, len(reactor.calls)) self.assertEqual(self.proto.onAutoPingTimeout, reactor.calls[0].func) timeout_call = reactor.calls[0] # elsewhere we check that we actually send an opcode-9 # message; now we just blindly inject our own reply # with a PONG frame frame = create_client_frame(opcode=10, payload=self.proto.autoPingPending) self.proto.data = frame # really needed twice; does header first, then rest self.proto.processData() self.proto.processData() # which should have cancelled the call self.assertTrue(timeout_call.cancelled)
def test_batched_close_to_now(framework_tx): ''' if our current time is fractional, and we make a call_later with a tiny delay that's still within the same second, we'll produce a negative call_later when adding a bucket; see issue #81 ''' from twisted.internet.task import Clock class FakeClock(Clock): def callLater(self, delay, *args, **kw): # noqa # 'real' reactors do this, but Clock doesn't assert on # this. assert delay >= 0 return Clock.callLater(self, delay, *args, **kw) with replace_loop(FakeClock()) as clock: clock.advance(0.5) batched = txaio.make_batched_timer(1, chunk_size=2) batched.call_later(0.1, lambda: None)
def test_cancel_while_waiting(self): """ if we start a component but call .stop before it connects, ever, it should still exit properly -- even if we're 'between' connection attempts """ endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component(transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, "max_retries": 0, "max_retry_delay": 5, "initial_retry_delay": 5, }, ) # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): def connect(factory, **kw): d = Deferred() reactor.callLater( 10, d.errback(RuntimeError("no connect for yo"))) return d endpoint.connect = connect d0 = component.start(reactor=reactor) assert component._delay_f is not None assert not component._done_f.called d1 = component.stop() assert component._done_f is None assert d0.called yield d1 yield d0
def test_cancel_while_waiting(self): """ if we start a component but call .stop before it connects, ever, it should still exit properly -- even if we're 'between' connection attempts """ endpoint = Mock() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, u"max_retries": 0, u"max_retry_delay": 5, u"initial_retry_delay": 5, }, ) # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): def connect(factory, **kw): d = Deferred() reactor.callLater(10, d.errback(RuntimeError("no connect for you"))) return d endpoint.connect = connect d0 = component.start(reactor=reactor) assert component._delay_f is not None assert not component._done_f.called d1 = component.stop() assert component._done_f is None assert d0.called yield d1 yield d0
def test_call_later_tx(framework_tx): ''' Wait for two Futures. ''' from twisted.internet.task import Clock new_loop = Clock() calls = [] with replace_loop(new_loop) as fake_loop: def foo(*args, **kw): calls.append((args, kw)) delay = txaio.call_later(1, foo, 5, 6, 7, foo="bar") assert len(calls) == 0 assert hasattr(delay, 'cancel') fake_loop.advance(2) assert len(calls) == 1 assert calls[0][0] == (5, 6, 7) assert calls[0][1] == dict(foo="bar")
def test_batched_chunks(framework_tx): ''' should yield to reactor every chunk ''' from twisted.internet.task import Clock laters = [] class FakeClock(Clock): def callLater(self, *args, **kw): # noqa laters.append((args, kw)) Clock.callLater(self, *args, **kw) new_loop = FakeClock() calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1, chunk_size=2) batched.call_later(2, foo, "call0") batched.call_later(2, foo, "call1") batched.call_later(2, foo, "call2") # we have 3 calls in one bucket, so there should be just a # single "real" delayed call outstanding assert len(laters) == 1 # ...and this call-later should be 2 seconds from now assert laters[0][0][0] == 2 # the chunk-size is 2, so after advancing to 2 seconds from # now, we should have notified 2 of the callers and added # another call-later. We're spreading these out over the # bucket-size, so it should be at 0.5 seconds from now. new_loop.advance(2) new_loop.advance(1) assert len(calls) == 3 assert len(laters) == 2 # second call-later half the interval in the future (i.e. 0.5s) assert laters[1][0][0] == 0.5
def test_batched_cancel_too_late(framework_tx): ''' nothing bad happens if we cancel() after the callbacks ''' from twisted.internet.task import Clock new_loop = Clock() calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") new_loop.advance(2.1) assert len(calls) == 1 call.cancel() assert len(calls) == 1 new_loop.advance(1) assert len(calls) == 1
def test_batched_cancel(framework_tx): ''' ''' from twisted.internet.task import Clock new_loop = Clock() calls = [] def foo(*args, **kw): calls.append((args, kw)) with replace_loop(new_loop): batched = txaio.make_batched_timer(1) call = batched.call_later(2, foo, "a call") # advance clock a bit; shouldn't have fired anything yet new_loop.advance(1.2) call.cancel() # advancing clock past where we "should" get the call, if it # were still active. new_loop.advance(4.0) assert len(calls) == 0
def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: u'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, u'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last)
def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: 'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, 'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, 'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last)
def test_successful_connect(self, fake_sleep): endpoint = Mock() joins = [] def joined(session, details): joins.append((session, details)) return session.leave() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, }) component.on('join', joined) def connect(factory, **kw): proto = factory.buildProtocol('ws://localhost/') transport = FakeTransport() proto.makeConnection(transport) from autobahn.websocket.protocol import WebSocketProtocol from base64 import b64encode from hashlib import sha1 key = proto.websocket_key + WebSocketProtocol._WS_MAGIC proto.data = (b"HTTP/1.1 101 Switching Protocols\x0d\x0a" b"Upgrade: websocket\x0d\x0a" b"Connection: upgrade\x0d\x0a" b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a" b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a") proto.processHandshake() from autobahn.wamp import role features = role.RoleBrokerFeatures( publisher_identification=True, pattern_based_subscription=True, session_meta_api=True, subscription_meta_api=True, subscriber_blackwhite_listing=True, publisher_exclusion=True, subscription_revocation=True, payload_transparency=True, payload_encryption_cryptobox=True, ) msg = Welcome(123456, dict(broker=features), realm='realm') serializer = JsonSerializer() data, is_binary = serializer.serialize(msg) proto.onMessage(data, is_binary) msg = Goodbye() proto.onMessage(*serializer.serialize(msg)) proto.onClose(True, 100, "some old reason") return succeed(proto) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): yield component.start(reactor=reactor) self.assertTrue(len(joins), 1) # make sure we fire all our time-outs reactor.advance(3600)
def test_successful_connect(self, fake_sleep): endpoint = Mock() joins = [] def joined(session, details): joins.append((session, details)) return session.leave() directlyProvides(endpoint, IStreamClientEndpoint) component = Component( transports={ "type": "websocket", "url": "ws://127.0.0.1/ws", "endpoint": endpoint, } ) component.on('join', joined) def connect(factory, **kw): proto = factory.buildProtocol('boom') proto.makeConnection(Mock()) from autobahn.websocket.protocol import WebSocketProtocol from base64 import b64encode from hashlib import sha1 key = proto.websocket_key + WebSocketProtocol._WS_MAGIC proto.data = ( b"HTTP/1.1 101 Switching Protocols\x0d\x0a" b"Upgrade: websocket\x0d\x0a" b"Connection: upgrade\x0d\x0a" b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a" b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a" ) proto.processHandshake() from autobahn.wamp import role features = role.RoleBrokerFeatures( publisher_identification=True, pattern_based_subscription=True, session_meta_api=True, subscription_meta_api=True, subscriber_blackwhite_listing=True, publisher_exclusion=True, subscription_revocation=True, payload_transparency=True, payload_encryption_cryptobox=True, ) msg = Welcome(123456, dict(broker=features), realm=u'realm') serializer = JsonSerializer() data, is_binary = serializer.serialize(msg) proto.onMessage(data, is_binary) msg = Goodbye() proto.onMessage(*serializer.serialize(msg)) proto.onClose(True, 100, "some old reason") return succeed(proto) endpoint.connect = connect # XXX it would actually be nicer if we *could* support # passing a reactor in here, but the _batched_timer = # make_batched_timer() stuff (slash txaio in general) # makes this "hard". reactor = Clock() with replace_loop(reactor): yield component.start(reactor=reactor) self.assertTrue(len(joins), 1) # make sure we fire all our time-outs reactor.advance(3600)