def test_keepalive(self): """ If a client connects with a timeout, and sends no data in keep_alive * 1.5, they will be disconnected. Compliance statement MQTT-3.1.2-24 """ sessions = {} h = BasicHandler() r = Clock() t = StringTransport() p = MQTTServerTwistedProtocol(h, r, sessions) p.makeConnection(t) data = ( # CONNECT, with keepalive of 2 b"101300044d51545404020002000774657374313233") for x in iterbytes(unhexlify(data)): p.dataReceived(x) self.assertEqual(len(r.calls), 1) self.assertEqual(r.calls[0].func, p._lose_connection) self.assertEqual(r.calls[0].getTime(), 3.0) self.assertFalse(t.disconnecting) r.advance(2.9) self.assertFalse(t.disconnecting) r.advance(0.1) self.assertTrue(t.disconnecting)
def test_keepalive_requires_full_packet(self): """ If a client connects with a keepalive, and sends no FULL packets in keep_alive * 1.5, they will be disconnected. Compliance statement MQTT-3.1.2-24 """ sessions = {} h = BasicHandler() r = Clock() t = StringTransport() p = MQTTServerTwistedProtocol(h, r, sessions) p.makeConnection(t) data = ( # CONNECT, with keepalive of 2 b"101300044d51545404020002000774657374313233") for x in iterbytes(unhexlify(data)): p.dataReceived(x) self.assertEqual(len(r.calls), 1) self.assertEqual(r.calls[0].func, p._lose_connection) self.assertEqual(r.calls[0].getTime(), 3.0) self.assertFalse(t.disconnecting) r.advance(2.9) self.assertFalse(t.disconnecting) data = ( # PINGREQ header, no body (incomplete packet) b"c0") for x in iterbytes(unhexlify(data)): p.dataReceived(x) # Timeout has not changed. If it reset the timeout on data recieved, # the delayed call's trigger time would instead be 2.9 + 3. self.assertEqual(len(r.calls), 1) self.assertEqual(r.calls[0].func, p._lose_connection) self.assertEqual(r.calls[0].getTime(), 3.0) r.advance(0.1) self.assertTrue(t.disconnecting)
def test_keepalive_full_packet_resets_timeout(self): """ If a client connects with a keepalive, and sends packets in under keep_alive * 1.5, the connection will remain, and the timeout will be reset. """ sessions = {} h = BasicHandler() r = Clock() t = StringTransport() p = MQTTServerTwistedProtocol(h, r, sessions) p.makeConnection(t) data = ( # CONNECT, with keepalive of 2 b"101300044d51545404020002000774657374313233") for x in iterbytes(unhexlify(data)): p.dataReceived(x) self.assertEqual(len(r.calls), 1) self.assertEqual(r.calls[0].func, p._lose_connection) self.assertEqual(r.calls[0].getTime(), 3.0) self.assertFalse(t.disconnecting) r.advance(2.9) self.assertFalse(t.disconnecting) data = ( # Full PINGREQ packet b"c000") for x in iterbytes(unhexlify(data)): p.dataReceived(x) # Timeout has changed, to be 2.9 (the time the packet was recieved) + 3 self.assertEqual(len(r.calls), 1) self.assertEqual(r.calls[0].func, p._lose_connection) self.assertEqual(r.calls[0].getTime(), 2.9 + 3.0) r.advance(0.1) self.assertFalse(t.disconnecting)
def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: 'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, 'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, 'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last)
def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: u'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, u'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last)