コード例 #1
0
        def test_connect_no_auth_method(self, fake_sleep):
            endpoint = Mock()

            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                },
                is_fatal=lambda e: True,
            )

            def connect(factory, **kw):
                proto = factory.buildProtocol('boom')
                proto.makeConnection(Mock())

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (
                    b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                    b"Upgrade: websocket\x0d\x0a"
                    b"Connection: upgrade\x0d\x0a"
                    b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                    b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a"
                )
                proto.processHandshake()

                from autobahn.wamp import role
                subrole = role.RoleSubscriberFeatures()

                msg = Hello(u"realm", roles=dict(subscriber=subrole), authmethods=[u"anonymous"])
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Abort(reason=u"wamp.error.no_auth_method")
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(False, 100, u"wamp.error.no_auth_method")

                return succeed(proto)
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                with self.assertRaises(RuntimeError) as ctx:
                    d = component.start(reactor=reactor)
                    # make sure we fire all our time-outs
                    reactor.advance(3600)
                    yield d
            self.assertIn(
                "Exhausted all transport",
                str(ctx.exception)
            )
コード例 #2
0
ファイル: test_protocol.py プロジェクト: deepakhajare/maas
class SuccessfulAsyncDispatch(unittest.TestCase):

    def setUp(self):
        self.clock = Clock()
        self.tmp_dir_path = tempfile.mkdtemp()
        with FilePath(self.tmp_dir_path).child('nonempty').open('w') as fd:
            fd.write('Something uninteresting')
        self.backend = FilesystemAsyncBackend(self.tmp_dir_path, self.clock)
        self.tftp = TFTP(self.backend, self.clock)

    def test_get_reader_defers(self):
        rrq_datagram = RRQDatagram('nonempty', 'NetASCiI', {})
        rrq_addr = ('127.0.0.1', 1069)
        rrq_mode = "octet"
        d = self.tftp._startSession(rrq_datagram, rrq_addr, rrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IReader.providedBy(d.result.backend))

    def test_get_writer_defers(self):
        wrq_datagram = WRQDatagram('foobar', 'NetASCiI', {})
        wrq_addr = ('127.0.0.1', 1069)
        wrq_mode = "octet"
        d = self.tftp._startSession(wrq_datagram, wrq_addr, wrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IWriter.providedBy(d.result.backend))
コード例 #3
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
 def test_convergence_error_start_new_iteration(self, logger):
     """
     Even if the convergence fails, a new iteration is started anyway.
     """
     local_state = NodeState(hostname=u'192.0.2.123')
     configuration = Deployment(nodes=frozenset([to_node(local_state)]))
     state = DeploymentState(nodes=[local_state])
     action = ControllableAction(result=fail(RuntimeError()))
     # First discovery succeeds, leading to failing action; second
     # discovery will just wait for Deferred to fire. Thus we expect to
     # finish test in discovery state.
     deployer = ControllableDeployer(
         local_state.hostname,
         [succeed(local_state), Deferred()],
         [action])
     client = self.make_amp_client([local_state])
     reactor = Clock()
     loop = build_convergence_loop_fsm(reactor, deployer)
     self.patch(loop, "logger", logger)
     loop.receive(_ClientStatusUpdate(
         client=client, configuration=configuration, state=state))
     reactor.advance(1.0)
     # Calculating actions happened, result was run and caused error...
     # but we started on loop again and are thus in discovery state,
     # which we can tell because all faked local states have been
     # consumed:
     self.assertEqual(len(deployer.local_states), 0)
コード例 #4
0
    def test_get_probe_timeout(self):
        """
        CreateContainer probe times-out if get_probe runs too long.
        """
        clock = Clock()

        node_id = uuid4()
        node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1'))
        control_service = FakeFlockerClient([node], node_id)

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )
        operation = CreateContainer(clock, cluster)
        d = operation.get_probe()

        clock.advance(DEFAULT_TIMEOUT.total_seconds())

        # No control_service.synchronize_state() call, so cluster state
        # never shows container is created.

        # The Deferred fails if container not created within 10 minutes.
        self.failureResultOf(d)
コード例 #5
0
ファイル: test_session.py プロジェクト: isaacm/mimic
    def test_impersonation(self):
        """
        SessionStore.session_for_impersonation will return a session that can
        be retrieved by token_id but not username.
        """
        clock = Clock()
        sessions = SessionStore(clock)
        A_LITTLE = 1234
        clock.advance(A_LITTLE)
        A_LOT = 65432
        a = sessions.session_for_impersonation("pretender", A_LOT)
        a_prime = sessions.session_for_impersonation("pretender", A_LOT)
        self.assertIdentical(a, a_prime)
        b = sessions.session_for_token(a.token)
        self.assertEqual(
            a.expires, datetime.utcfromtimestamp(A_LITTLE + A_LOT))
        self.assertIdentical(a, b)
        c = sessions.session_for_username_password("pretender",
                                                   "not a password")
        self.assertNotIdentical(a, c)
        self.assertEqual(a.username, c.username)
        self.assertEqual(a.tenant_id, c.tenant_id)

        # Right now all data_for_api cares about is hashability; this may need
        # to change if it comes to rely upon its argument actually being an API
        # mock.
        same_api = 'not_an_api'

        username_data = c.data_for_api(same_api, list)
        token_data = b.data_for_api(same_api, list)
        impersonation_data = a.data_for_api(same_api, list)

        self.assertIs(username_data, impersonation_data)
        self.assertIs(token_data, impersonation_data)
コード例 #6
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
    def test_convergence_done_unchanged_notify(self):
        """
        An FSM doing convergence that discovers state unchanged from the last
        state acknowledged by the control service does not re-send that state.
        """
        local_state = NodeState(hostname=u'192.0.2.123')
        configuration = Deployment(nodes=[to_node(local_state)])
        state = DeploymentState(nodes=[local_state])
        deployer = ControllableDeployer(
            local_state.hostname,
            [succeed(local_state), succeed(local_state.copy())],
            [no_action(), no_action()]
        )
        client = self.make_amp_client([local_state])
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            client=client, configuration=configuration, state=state))
        reactor.advance(1.0)

        # Calculating actions happened, result was run... and then we did
        # whole thing again:
        self.assertEqual(
            (deployer.calculate_inputs, client.calls),
            (
                # Check that the loop has run twice
                [(local_state, configuration, state),
                 (local_state, configuration, state)],
                # But that state was only sent once.
                [(NodeStateCommand, dict(state_changes=(local_state,)))],
            )
        )
コード例 #7
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
    def test_convergence_sent_state_fail_resends(self):
        """
        If sending state to the control node fails the next iteration will send
        state even if the state hasn't changed.
        """
        local_state = NodeState(hostname=u'192.0.2.123')
        configuration = Deployment(nodes=[to_node(local_state)])
        state = DeploymentState(nodes=[local_state])
        deployer = ControllableDeployer(
            local_state.hostname,
            [succeed(local_state), succeed(local_state.copy())],
            [no_action(), no_action()])
        client = self.make_amp_client(
            [local_state, local_state.copy()], succeed=False
        )
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            client=client, configuration=configuration, state=state))
        reactor.advance(1.0)

        # Calculating actions happened, result was run... and then we did
        # whole thing again:
        self.assertTupleEqual(
            (deployer.calculate_inputs, client.calls),
            (
                # Check that the loop has run twice
                [(local_state, configuration, state),
                 (local_state, configuration, state)],
                # And that state was re-sent even though it remained unchanged
                [(NodeStateCommand, dict(state_changes=(local_state,))),
                 (NodeStateCommand, dict(state_changes=(local_state,)))],
            )
        )
コード例 #8
0
    def test_sendHeartbeat(self):

        xmppConfig = Config(PListConfigProvider(DEFAULT_CONFIG))
        xmppConfig.Notifications["Services"]["XMPPNotifier"]["Enabled"] = True
        xmppConfig.ServerHostName = "server.example.com"
        xmppConfig.HTTPPort = 80

        clock = Clock()
        xmlStream = StubXmlStream()
        settings = { "ServiceAddress" : "pubsub.example.com", "JID" : "jid",
            "Password" : "password", "KeepAliveSeconds" : 5,
            "NodeConfiguration" : { "pubsub#deliver_payloads" : "1" },
            "HeartbeatMinutes" : 30 }
        notifier = XMPPNotifier(settings, reactor=clock, heartbeat=True,
            roster=False, configOverride=xmppConfig)
        factory = XMPPNotificationFactory(notifier, settings, reactor=clock,
            keepAlive=False)
        factory.connected(xmlStream)
        factory.authenticated(xmlStream)

        self.assertEquals(len(xmlStream.elements), 1)
        heartbeat = xmlStream.elements[0]
        self.assertEquals(heartbeat.name, "iq")

        clock.advance(1800)

        self.assertEquals(len(xmlStream.elements), 2)
        heartbeat = xmlStream.elements[1]
        self.assertEquals(heartbeat.name, "iq")

        factory.disconnected(xmlStream)
        clock.advance(1800)
        self.assertEquals(len(xmlStream.elements), 2)
コード例 #9
0
    def test_sendPresence(self):
        clock = Clock()
        xmlStream = StubXmlStream()
        settings = { "ServiceAddress" : "pubsub.example.com", "JID" : "jid",
            "NodeConfiguration" : { "pubsub#deliver_payloads" : "1" },
            "Password" : "password", "KeepAliveSeconds" : 5 }
        notifier = XMPPNotifier(settings, reactor=clock, heartbeat=False)
        factory = XMPPNotificationFactory(notifier, settings, reactor=clock)
        factory.connected(xmlStream)
        factory.authenticated(xmlStream)

        self.assertEquals(len(xmlStream.elements), 2)
        presence = xmlStream.elements[0]
        self.assertEquals(presence.name, "presence")
        iq = xmlStream.elements[1]
        self.assertEquals(iq.name, "iq")

        clock.advance(5)

        self.assertEquals(len(xmlStream.elements), 3)
        presence = xmlStream.elements[2]
        self.assertEquals(presence.name, "presence")

        factory.disconnected(xmlStream)
        clock.advance(5)
        self.assertEquals(len(xmlStream.elements), 3)
コード例 #10
0
ファイル: collector.py プロジェクト: inkhey/mmc
    def test04_get_only_valid_requests(self):
        """
        - create a lot of requests marked as 'expired'
        - wait some time
        - create another lot of requests marked as 'valid'
        -> check if only 'valid' requests present
        """

        clock = Clock()

        sessions = Sessions(False, 10, clock)
        collector = Collector(sessions)

        dl = []
        for i in xrange(10):
            d = collector.queue_and_process("192.168.45.12", "expired")
            dl.append(d)

        clock.advance(15)

        for i in xrange(10):
            d = collector.queue_and_process("192.168.45.12", "valid")
            dl.append(d)


        dfl = DeferredList(dl)
        @dfl.addCallback
        def get_result(ignored):

            for i in xrange(10):
                uid, ip, request = collector.get()
                self.assertEqual(request, "valid")
コード例 #11
0
ファイル: test_xml_over_tcp.py プロジェクト: AndrewCvekl/vumi
    def test_timeout(self):
        request_body = (
            "<ENQRequest>"
            "<requestId>0</requestId>"
            "<enqCmd>ENQUIRELINK</enqCmd>"
            "</ENQRequest>")
        expected_request_packet = utils.mk_packet('0', request_body)

        clock = Clock()
        self.client.clock = clock
        self.client.enquire_link_interval = 120
        self.client.timeout_period = 20
        self.client.authenticated = True
        self.client.start_periodic_enquire_link()

        # wait for the first enquire link request
        received_request_packet = yield self.server.wait_for_data()
        self.assertEqual(expected_request_packet, received_request_packet)

        # advance to just before the timeout should occur
        clock.advance(19.9)
        self.assertFalse(self.client.disconnected)

        # advance to just after the timeout should occur
        clock.advance(0.1)
        self.assertTrue(self.client.disconnected)
        self.assert_in_log(
            'msg',
            "No enquire link response received after 20 seconds, "
            "disconnecting")
コード例 #12
0
ファイル: test_furnace.py プロジェクト: dkkline/bravo
    def test_timer_mega_drift(self):
        # Patch the clock.
        clock = Clock()
        self.tile.burning.clock = clock

        # we have more wood than we need and we can process 2 blocks
        # but we have space only for one
        self.tile.inventory.fuel[0] = Slot(blocks['sapling'].slot, 0, 10)
        self.tile.inventory.crafting[0] = Slot(blocks['sand'].slot, 0, 2)
        self.tile.inventory.crafted[0] = Slot(blocks['glass'].slot, 0, 63)
        self.tile.changed(self.factory, coords)

        # Pump the clock. Burn time is 20s.
        clock.advance(20)

        self.assertEqual(self.factory.world.chunk.states[0],
                         blocks["burning-furnace"].slot) # it was started...
        self.assertEqual(self.factory.world.chunk.states[1],
                         blocks["furnace"].slot) # ...and stopped at the end
        self.assertEqual(self.tile.inventory.fuel[0], (blocks['sapling'].slot, 0, 8))
        self.assertEqual(self.tile.inventory.crafting[0], (blocks['sand'].slot, 0, 1))
        self.assertEqual(self.tile.inventory.crafted[0], (blocks['glass'].slot, 0, 64))
        headers = [header[0] for header, params in self.protocol.write_packet_calls]
        # 2 updates for fuel slot (2 saplings burned)
        # 1 updates for crafting slot (1 sand blocks melted)
        # 1 updates for crafted slot (1 glass blocks crafted)
        self.assertEqual(headers.count('window-slot'), 4)
コード例 #13
0
    def test_scenario_throws_exception_when_rate_drops(self):
        """
        ReadRequestLoadScenario raises RequestRateTooLow if rate
        drops below the requested rate.

        Establish the requested rate by having the FakeFlockerClient
        respond to all requests, then lower the rate by dropping
        alternate requests. This should result in RequestRateTooLow
        being raised.
        """
        c = Clock()

        cluster = self.make_cluster(RequestDroppingFakeFlockerClient)
        sample_size = 5
        s = ReadRequestLoadScenario(c, cluster, sample_size=sample_size)

        s.start()

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        cluster.get_control_service(c).drop_requests = True

        # Advance the clock by 2 seconds so that a request is dropped
        # and a new rate which is below the target can be established.
        c.advance(2)

        failure = self.failureResultOf(s.maintained())
        self.assertIsInstance(failure.value, RequestRateTooLow)
コード例 #14
0
    def test_inboxReplyFailedDelete(self):
        """
        When an inbox item that contains a reply is seen by the client, it
        deletes it immediately.  If the delete fails, the appropriate response
        code is returned.
        """
        userNumber = 1
        clock = Clock()
        inboxURL = '/some/inbox/'
        vevent = Component.fromString(INBOX_REPLY)
        inbox = Calendar(
            caldavxml.schedule_inbox, set(), u'the inbox', inboxURL, None)
        client = StubClient(userNumber, self.mktemp())
        client._calendars[inboxURL] = inbox

        inboxEvent = Event(client.serializeLocation(), inboxURL + u'4321.ics', None, vevent)
        client._setEvent(inboxEvent.url, inboxEvent)
        client._failDeleteWithObject(inboxEvent.url, IncorrectResponseCode(
            NO_CONTENT,
            Response(
                ('HTTP', 1, 1), PRECONDITION_FAILED,
                'Precondition Failed', None, None))
        )
        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.eventChanged(inboxEvent.url)
        clock.advance(3)
        self.assertNotIn(inboxEvent.url, client._events)
        self.assertNotIn('4321.ics', inbox.events)
コード例 #15
0
    def test_changeEventAttendeePreconditionFailed(self):
        """
        If the attempt to accept an invitation fails because of an
        unmet precondition (412), the event is re-retrieved and the
        PUT is re-issued with the new data.
        """
        clock = Clock()
        userNumber = 2
        client = StubClient(userNumber, self.mktemp())
        randomDelay = 3

        calendarURL = '/some/calendar/'
        calendar = Calendar(
            caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
        client._calendars[calendarURL] = calendar

        vevent = Component.fromString(INVITED_EVENT)
        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
        client._setEvent(event.url, event)

        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.setParameters(acceptDelayDistribution=Deterministic(randomDelay))

        client.rescheduled.add(event.url)

        accepter.eventChanged(event.url)
        clock.advance(randomDelay)
コード例 #16
0
ファイル: test_loop.py プロジェクト: aminembarki/flocker
 def test_convergence_done_start_new_iteration(self):
     """
     After a short delay, an FSM completing the changes from one convergence
     iteration starts another iteration.
     """
     local_state = NodeState(hostname=b'192.0.2.123')
     local_state2 = NodeState(hostname=b'192.0.2.123')
     configuration = Deployment(nodes=frozenset([to_node(local_state)]))
     state = DeploymentState(nodes=[local_state])
     action = ControllableAction(result=succeed(None))
     # Because the second action result is unfired Deferred, the second
     # iteration will never finish; applying its changes waits for this
     # Deferred to fire.
     action2 = ControllableAction(result=Deferred())
     deployer = ControllableDeployer(
         [succeed(local_state), succeed(local_state2)],
         [action, action2])
     client = self.successful_amp_client([local_state, local_state2])
     reactor = Clock()
     loop = build_convergence_loop_fsm(reactor, deployer)
     loop.receive(_ClientStatusUpdate(
         client=client, configuration=configuration, state=state))
     reactor.advance(1.0)
     # Calculating actions happened, result was run... and then we did
     # whole thing again:
     self.assertEqual((deployer.calculate_inputs, client.calls),
                      ([(local_state, configuration, state),
                        (local_state2, configuration, state)],
                       [(NodeStateCommand, dict(node_state=local_state)),
                        (NodeStateCommand, dict(node_state=local_state2))]))
コード例 #17
0
    def test_too_many_iterations(self):
        """
        If ``retry_failure`` fails more times than there are steps provided, it
        errors back with the last failure.
        """
        steps = [0.1]

        result = object()
        failure = Failure(ValueError("really bad value"))

        results = [
            Failure(ValueError("bad value")),
            failure,
            succeed(result),
        ]

        def function():
            return results.pop(0)

        clock = Clock()

        d = retry_failure(clock, function, steps=steps)
        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(self.failureResultOf(d), failure)
コード例 #18
0
    def test_limited_exceptions(self):
        """
        By default, ``retry_failure`` retries on any exception. However, if
        it's given an iterable of expected exception types (exactly as one
        might pass to ``Failure.check``), then it will only retry if one of
        *those* exceptions is raised.
        """
        steps = [0.1, 0.2]

        result = object()
        type_error = Failure(TypeError("bad type"))

        results = [
            Failure(ValueError("bad value")),
            type_error,
            succeed(result),
        ]

        def function():
            return results.pop(0)

        clock = Clock()

        d = retry_failure(clock, function, expected=[ValueError], steps=steps)
        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(self.failureResultOf(d), type_error)
コード例 #19
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_taskProductionFailed_deferred_doesnt_delay_polling(self):
        # If taskProductionFailed returns a deferred, we don't wait for it to
        # fire before polling again.
        class DeferredFailingConsumer(NoopTaskConsumer):
            def taskProductionFailed(self, reason):
                failures.append(reason)
                return Deferred()

        interval = self.factory.getUniqueInteger()
        clock = Clock()
        produced = []
        failures = []

        def producer():
            exc = RuntimeError()
            produced.append(exc)
            raise exc

        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        consumer = DeferredFailingConsumer()
        task_source.start(consumer)
        # The call to start polls once and taskProductionFailed is called.
        self.assertEqual((1, 1), (len(produced), len(failures)))
        # Even though taskProductionFailed returned a deferred which has not
        # yet fired, we poll again after 'interval' seconds.
        clock.advance(interval)
        self.assertEqual((2, 2), (len(produced), len(failures)))
コード例 #20
0
ファイル: test_loop.py プロジェクト: uedzen/flocker
    def test_convergence_done_delays_new_iteration_ack(self):
        """
        A state update isn't sent if the control node hasn't acknowledged the
        last state update.
        """
        self.local_state = local_state = NodeState(hostname=u'192.0.2.123')
        self.configuration = configuration = Deployment()
        self.cluster_state = received_state = DeploymentState(nodes=[])
        self.action = action = ControllableAction(result=succeed(None))
        deployer = ControllableDeployer(
            local_state.hostname, [succeed(local_state)], [action]
        )
        client = self.make_amp_client([local_state])
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            # We don't want to receive the acknowledgment of the
            # state update.
            client=DelayedAMPClient(client),
            configuration=configuration,
            state=received_state))

        # Wait for the delay in the convergence loop to pass.  This won't do
        # anything, since we are also waiting for state to be acknowledged.
        reactor.advance(1.0)

        # Only one status update was sent.
        self.assertListEqual(
            client.calls,
            [(NodeStateCommand, dict(state_changes=(local_state,)))],
        )
コード例 #21
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_only_one_producer_call_at_once(self):
        # If the task producer returns a Deferred, it will not be called again
        # until that deferred has fired, even if takes longer than the
        # interval we're polling at.
        tasks_called = []
        produced_deferreds = []

        def producer():
            deferred = Deferred()
            produced_deferreds.append(deferred)
            return deferred

        clock = Clock()
        interval = self.factory.getUniqueInteger()
        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        task_source.start(AppendingTaskConsumer(tasks_called))
        # The call to start calls producer.  It returns a deferred which has
        # not been fired.
        self.assertEqual(len(produced_deferreds), 1)
        # If 'interval' seconds passes and the deferred has still not fired
        # the producer is not called again.
        clock.advance(interval)
        self.assertEqual(len(produced_deferreds), 1)
        # If the task-getting deferred is fired and more time passes, we poll
        # again.
        produced_deferreds[0].callback(None)
        clock.advance(interval)
        self.assertEqual(len(produced_deferreds), 2)
コード例 #22
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_taskStarted_deferred_doesnt_delay_polling(self):
        # If taskStarted returns a deferred, we don't wait for it to fire
        # before polling again.
        class DeferredStartingConsumer(NoopTaskConsumer):
            def taskStarted(self, task):
                started.append(task)
                return Deferred()

        interval = self.factory.getUniqueInteger()
        clock = Clock()
        produced = []
        started = []

        def producer():
            value = self.factory.getUniqueInteger()
            produced.append(value)
            return value

        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        consumer = DeferredStartingConsumer()
        task_source.start(consumer)
        # The call to start polls once and taskStarted is called.
        self.assertEqual((1, 1), (len(produced), len(started)))
        # Even though taskStarted returned a deferred which has not yet fired,
        # we poll again after 'interval' seconds.
        clock.advance(interval)
        self.assertEqual((2, 2), (len(produced), len(started)))
コード例 #23
0
    def test_ignoreAlreadyAccepting(self):
        """
        If the client sees an event change a second time before
        responding to an invitation found on it during the first
        change notification, the second change notification does not
        generate another accept attempt.
        """
        clock = Clock()
        randomDelay = 7
        vevent = Component.fromString(INVITED_EVENT)
        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
        userNumber = int(attendees[1].parameterValue('CN').split(None, 1)[1])
        calendarURL = '/some/calendar/'
        calendar = Calendar(
            caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
        client = StubClient(userNumber, self.mktemp())
        client._calendars[calendarURL] = calendar
        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
        client._events[event.url] = event
        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.random = Deterministic()

        def _gauss(mu, sigma):
            return randomDelay
        accepter.random.gauss = _gauss
        accepter.eventChanged(event.url)
        accepter.eventChanged(event.url)
        clock.advance(randomDelay)
コード例 #24
0
    def test_wait_for_active_errors(self, server_details):
        """
        wait_for_active will errback it's Deferred if it encounters a non-active
        state transition.
        """
        clock = Clock()

        server_status = ['BUILD', 'ERROR']

        def _server_status(*args, **kwargs):
            return succeed({'server': {'status': server_status.pop(0)}})

        server_details.side_effect = _server_status

        d = wait_for_active(self.log,
                            'http://url/', 'my-auth-token', 'serverId',
                            clock=clock)

        clock.advance(5)

        failure = self.failureResultOf(d)
        self.assertTrue(failure.check(UnexpectedServerStatus))

        self.assertEqual(failure.value.server_id, 'serverId')
        self.assertEqual(failure.value.status, 'ERROR')
        self.assertEqual(failure.value.expected_status, 'ACTIVE')
コード例 #25
0
    def test_intervals_can_be_an_iterable(self):
        # Take control of time.
        clock = Clock()
        # Use intervals of 1s, 2s, 3, and then back to 1s.
        intervals = cycle((1.0, 2.0, 3.0))

        gen_retries = utils.retries(5, intervals, time=clock.seconds)
        # No time has passed, 5 seconds remain, and it suggests sleeping
        # for 1 second, then 2, then 3, then 1 again.
        self.assertRetry(clock, next(gen_retries), 0, 5, 1)
        self.assertRetry(clock, next(gen_retries), 0, 5, 2)
        self.assertRetry(clock, next(gen_retries), 0, 5, 3)
        self.assertRetry(clock, next(gen_retries), 0, 5, 1)
        # Mimic sleeping for 3.5 seconds, more than the suggested.
        clock.advance(3.5)
        # Now 3.5 seconds have passed, 1.5 seconds remain, and it suggests
        # sleeping for 1.5 seconds, 0.5 less than the next expected interval
        # of 2.0 seconds.
        self.assertRetry(clock, next(gen_retries), 3.5, 1.5, 1.5)
        # Don't sleep, ask again immediately, and the same answer is given.
        self.assertRetry(clock, next(gen_retries), 3.5, 1.5, 1.5)
        # Don't sleep, ask again immediately, and 1.0 seconds is given,
        # because we're back to the 1.0 second interval.
        self.assertRetry(clock, next(gen_retries), 3.5, 1.5, 1.0)
        # Mimic sleeping for 100 seconds, much more than the suggested.
        clock.advance(100)
        # There's always a final chance to try something, but the elapsed and
        # remaining figures are still calculated with reference to the current
        # time. The wait time never goes below zero.
        self.assertRetry(clock, next(gen_retries), 103.5, -98.5, 0.0)
        # All done.
        self.assertRaises(StopIteration, next, gen_retries)
コード例 #26
0
 def test_call_later_once(self):
     self.called = 0
     reactor = Clock()
     call = CallLaterOnce(self.call_function, reactor=reactor)
     call.schedule(delay=1)
     reactor.advance(1)
     assert self.called == 1
コード例 #27
0
class SuccessfulAsyncDispatch(unittest.TestCase):
    def setUp(self):
        self.clock = Clock()
        self.temp_dir = FilePath(tempfile.mkdtemp()).asBytesMode()
        with self.temp_dir.child(b"nonempty").open("w") as fd:
            fd.write(b"Something uninteresting")
        self.backend = FilesystemAsyncBackend(self.temp_dir, self.clock)
        self.tftp = TFTP(self.backend, self.clock)

    def test_get_reader_defers(self):
        rrq_datagram = RRQDatagram(b"nonempty", b"NetASCiI", {})
        rrq_addr = ("127.0.0.1", 1069)
        rrq_mode = b"octet"
        d = self.tftp._startSession(rrq_datagram, rrq_addr, rrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IReader.providedBy(d.result.backend))

    def test_get_writer_defers(self):
        wrq_datagram = WRQDatagram(b"foobar", b"NetASCiI", {})
        wrq_addr = ("127.0.0.1", 1069)
        wrq_mode = b"octet"
        d = self.tftp._startSession(wrq_datagram, wrq_addr, wrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IWriter.providedBy(d.result.backend))
コード例 #28
0
ファイル: test_sql.py プロジェクト: nunb/calendarserver
    def test_logWaitsAndTxnTimeout(self):
        """
        CommonStoreTransactionMonitor logs waiting transactions and terminates long transactions.
        """

        c = Clock()
        self.patch(CommonStoreTransactionMonitor, "callLater", c.callLater)

        # Patch config to turn on log waits then rebuild the store
        self.patch(self.store, "logTransactionWaits", 1)
        self.patch(self.store, "timeoutTransactions", 2)

        ctr = [0, 0]
        def counter(logStr, *args, **kwargs):
            if "wait" in logStr:
                ctr[0] += 1
            elif "abort" in logStr:
                ctr[1] += 1
        self.patch(log, "error", counter)

        txn = self.transactionUnderTest()

        c.advance(2)
        self.assertNotEqual(ctr[0], 0)
        self.assertNotEqual(ctr[1], 0)
        self.assertTrue(txn._sqlTxn._completed)
コード例 #29
0
    def test_wait_for_active_stops_looping_on_success(self, server_details):
        """
        wait_for_active stops looping when it encounters the active state.
        """
        clock = Clock()
        server_status = ['BUILD', 'ACTIVE']

        def _server_status(*args, **kwargs):
            return succeed({'server': {'status': server_status.pop(0)}})

        server_details.side_effect = _server_status

        d = wait_for_active(self.log,
                            'http://url/', 'my-auth-token', 'serverId',
                            clock=clock)

        # This gets called once immediately then every 5 seconds.
        self.assertEqual(server_details.call_count, 1)

        clock.advance(5)

        self.assertEqual(server_details.call_count, 2)

        clock.advance(5)

        # This has not been called a 3rd time because we encountered the active
        # state and the looping call stopped.
        self.assertEqual(server_details.call_count, 2)

        self.successResultOf(d)
コード例 #30
0
    def test_iterates(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, result]

        def predicate():
            return results.pop(0)
        clock = Clock()

        d = loop_until(clock, predicate)

        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(
            self.successResultOf(d),
            result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'result': result,
        })
        self.assertTrue(action.succeeded)
        message = LoggedMessage.of_type(
            logger.messages, LOOP_UNTIL_ITERATION_MESSAGE)[0]
        self.assertEqual(action.children, [message])
        assertContainsFields(self, message.message, {
            'result': None,
        })
コード例 #31
0
ファイル: test_protocol.py プロジェクト: pavvyb/proj3rdyear
class AMQClientTest(TestCase):
    """Unit tests for the AMQClient protocol."""

    def setUp(self):
        super(AMQClientTest, self).setUp()
        self.delegate = TwistedDelegate()
        self.clock = Clock()
        self.heartbeat = 1
        self.protocol = AMQClient(
            self.delegate, "/", load(DEFAULT_SPEC), clock=self.clock,
            heartbeat=self.heartbeat)
        self.transport = AMQPump(Logger())
        self.transport.connect(self.protocol)

    def test_connection_close(self):
        """Test handling a connection-close method sent by the broker."""
        self.transport.channel(0).connection_close()
        # We send close-ok before shutting down the connection
        frame = self.transport.outgoing[0][0]
        self.assertEqual("close-ok", frame.payload.method.name)
        self.assertTrue(self.protocol.closed)
        channel0 = self.successResultOf(self.protocol.channel(0))
        self.assertTrue(channel0.closed)

    def test_connection_close_raises_error(self):
        """Test receiving a connection-close method raises ConnectionClosed."""
        channel = self.successResultOf(self.protocol.channel(0))
        d = channel.basic_consume(queue="test-queue")
        self.transport.channel(0).connection_close(reply_code=320)
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, ConnectionClosed)

    def test_close(self):
        """Test explicitely closing a client."""
        d = self.protocol.close()

        # Since 'within' defaults to 0, no outgoing 'close' frame is there.
        self.assertEqual({}, self.transport.outgoing)

        self.assertIsNone(self.successResultOf(d))
        self.assertTrue(self.protocol.closed)

    def test_close_within(self):
        """Test closing a client cleanly."""
        d = self.protocol.close(within=1)

        # Since we passed within=1, we have an outgoing 'close' frame.
        frame = self.transport.outgoing[0][0]
        self.assertEqual("close", frame.payload.method.name)

        # At this point the client is not yet closed, since we're waiting for
        # the 'close-ok' acknowledgement from the broker.
        self.assertFalse(self.protocol.closed)

        self.transport.channel(0).connection_close_ok()
        self.assertIsNone(self.successResultOf(d))
        self.assertTrue(self.protocol.closed)
        self.assertEqual([], self.clock.calls)

    def test_close_within_hits_timeout(self):
        """Test trying to close a client cleanly but hitting the timeout."""
        d = self.protocol.close(within=1)

        self.clock.advance(1)
        self.assertIsNone(self.successResultOf(d))
        self.assertTrue(self.protocol.closed)

    def test_close_closes_channels(self):
        """Test closing a client also closes channels."""
        channel = self.successResultOf(self.protocol.channel(0))
        self.protocol.close()
        d = channel.basic_consume(queue="test-queue")
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, Closed)

    def test_close_closes_queues(self):
        """Test closing a client also closes queues."""
        queue = self.successResultOf(self.protocol.queue("tag"))
        d = queue.get()
        self.protocol.close()
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, QueueClosed)

    def test_hearbeat_check_failure(self):
        """Test closing a client after a heartbeat check failure."""
        self.protocol.started.fire()
        channel = self.successResultOf(self.protocol.channel(0))
        d = channel.basic_consume(queue="test-queue")
        self.clock.advance(self.heartbeat * AMQClient.MAX_UNSEEN_HEARTBEAT)
        self.assertTrue(self.protocol.closed)
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, Closed)
        self.assertTrue(self.transport.aborted)

    def test_connection_lost(self):
        """Test closing a client after the connection is lost."""
        channel = self.successResultOf(self.protocol.channel(0))
        d = channel.basic_consume(queue="test-queue")
        self.transport.abortConnection()
        self.assertTrue(self.protocol.closed)
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, Closed)
        self.assertIsInstance(failure.value.args[0].value, ConnectionLost)

    def test_channel_close(self):
        """Test receiving a channel-close method raises ChannelClosed."""
        channel = self.successResultOf(self.protocol.channel(0))
        d = channel.basic_consume(queue="non-existing-queue")
        self.transport.channel(0).channel_close(reply_code=404)
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, ChannelClosed)

    def test_sending_method_on_closed_channel(self):
        """Sending a method on a closed channel fails immediately."""
        channel = self.successResultOf(self.protocol.channel(0))
        self.transport.channel(0).connection_close(reply_code=320)
        self.transport.outgoing.clear()
        d = channel.basic_consume(queue="test-queue")
        # No frames were sent
        self.assertEqual({}, self.transport.outgoing)
        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, ConnectionClosed)

    def test_disconnected_event(self):
        """Test disconnected event fired after the connection is lost."""
        deferred = self.protocol.disconnected.wait()
        self.protocol.close()
        self.assertTrue(self.successResultOf(deferred))
コード例 #32
0
class TestComponentManagerProperStart(unittest.TestCase):
    def setUp(self):
        self.reactor = Clock()
        mocks.mock_conf_settings(self)
        self.component_manager = ComponentManager(
            skip_components=[
                DATABASE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT,
                STREAM_IDENTIFIER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
                REFLECTOR_COMPONENT, UPNP_COMPONENT, HEADERS_COMPONENT,
                PAYMENT_RATE_COMPONENT, RATE_LIMITER_COMPONENT,
                EXCHANGE_RATE_MANAGER_COMPONENT
            ],
            reactor=self.reactor,
            wallet=mocks.FakeDelayedWallet,
            file_manager=mocks.FakeDelayedFileManager,
            blob_manager=mocks.FakeDelayedBlobManager)

    def tearDown(self):
        pass

    def test_proper_starting_of_components(self):
        self.component_manager.setup()
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)

        self.reactor.advance(1)
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)

        self.reactor.advance(1)
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(
            self.component_manager.get_component('file_manager').running)

    def test_proper_stopping_of_components(self):
        self.component_manager.setup()
        self.reactor.advance(1)
        self.reactor.advance(1)
        self.component_manager.stop()
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(self.component_manager.get_component('wallet').running)

        self.reactor.advance(1)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(self.component_manager.get_component('wallet').running)

        self.reactor.advance(1)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('wallet').running)
コード例 #33
0
class MemCacheTestCase(CommandMixin, TestCase):
    """
    Test client protocol class L{MemCacheProtocol}.
    """
    def setUp(self):
        """
        Create a memcache client, connect it to a string protocol, and make it
        use a deterministic clock.
        """
        self.proto = MemCacheProtocol()
        self.clock = Clock()
        self.proto.callLater = self.clock.callLater
        self.transport = StringTransportWithDisconnection()
        self.transport.protocol = self.proto
        self.proto.makeConnection(self.transport)

    def _test(self, d, send, recv, result):
        """
        Implementation of C{_test} which checks that the command sends C{send}
        data, and that upon reception of C{recv} the result is C{result}.

        @param d: the resulting deferred from the memcache command.
        @type d: C{Deferred}

        @param send: the expected data to be sent.
        @type send: C{str}

        @param recv: the data to simulate as reception.
        @type recv: C{str}

        @param result: the expected result.
        @type result: C{any}
        """
        def cb(res):
            self.assertEquals(res, result)

        self.assertEquals(self.transport.value(), send)
        d.addCallback(cb)
        self.proto.dataReceived(recv)
        return d

    def test_invalidGetResponse(self):
        """
        If the value returned doesn't match the expected key of the current
        C{get} command, an error is raised in L{MemCacheProtocol.dataReceived}.
        """
        self.proto.get("foo")
        s = "spamegg"
        self.assertRaises(RuntimeError, self.proto.dataReceived,
                          "VALUE bar 0 %s\r\n%s\r\nEND\r\n" % (len(s), s))

    def test_invalidMultipleGetResponse(self):
        """
        If the value returned doesn't match one the expected keys of the
        current multiple C{get} command, an error is raised error in
        L{MemCacheProtocol.dataReceived}.
        """
        self.proto.getMultiple(["foo", "bar"])
        s = "spamegg"
        self.assertRaises(RuntimeError, self.proto.dataReceived,
                          "VALUE egg 0 %s\r\n%s\r\nEND\r\n" % (len(s), s))

    def test_timeOut(self):
        """
        Test the timeout on outgoing requests: when timeout is detected, all
        current commands fail with a L{TimeoutError}, and the connection is
        closed.
        """
        d1 = self.proto.get("foo")
        d2 = self.proto.get("bar")
        d3 = Deferred()
        self.proto.connectionLost = d3.callback

        self.clock.advance(self.proto.persistentTimeOut)
        self.assertFailure(d1, TimeoutError)
        self.assertFailure(d2, TimeoutError)

        def checkMessage(error):
            self.assertEquals(str(error), "Connection timeout")

        d1.addCallback(checkMessage)
        return gatherResults([d1, d2, d3])

    def test_timeoutRemoved(self):
        """
        When a request gets a response, no pending timeout call remains around.
        """
        d = self.proto.get("foo")

        self.clock.advance(self.proto.persistentTimeOut - 1)
        self.proto.dataReceived("VALUE foo 0 3\r\nbar\r\nEND\r\n")

        def check(result):
            self.assertEquals(result, (0, "bar"))
            self.assertEquals(len(self.clock.calls), 0)

        d.addCallback(check)
        return d

    def test_timeOutRaw(self):
        """
        Test the timeout when raw mode was started: the timeout is not reset
        until all the data has been received, so we can have a L{TimeoutError}
        when waiting for raw data.
        """
        d1 = self.proto.get("foo")
        d2 = Deferred()
        self.proto.connectionLost = d2.callback

        self.proto.dataReceived("VALUE foo 0 10\r\n12345")
        self.clock.advance(self.proto.persistentTimeOut)
        self.assertFailure(d1, TimeoutError)
        return gatherResults([d1, d2])

    def test_timeOutStat(self):
        """
        Test the timeout when stat command has started: the timeout is not
        reset until the final B{END} is received.
        """
        d1 = self.proto.stats()
        d2 = Deferred()
        self.proto.connectionLost = d2.callback

        self.proto.dataReceived("STAT foo bar\r\n")
        self.clock.advance(self.proto.persistentTimeOut)
        self.assertFailure(d1, TimeoutError)
        return gatherResults([d1, d2])

    def test_timeoutPipelining(self):
        """
        When two requests are sent, a timeout call remains around for the
        second request, and its timeout time is correct.
        """
        d1 = self.proto.get("foo")
        d2 = self.proto.get("bar")
        d3 = Deferred()
        self.proto.connectionLost = d3.callback

        self.clock.advance(self.proto.persistentTimeOut - 1)
        self.proto.dataReceived("VALUE foo 0 3\r\nbar\r\nEND\r\n")

        def check(result):
            self.assertEquals(result, (0, "bar"))
            self.assertEquals(len(self.clock.calls), 1)
            for i in range(self.proto.persistentTimeOut):
                self.clock.advance(1)
            return self.assertFailure(d2, TimeoutError).addCallback(checkTime)

        def checkTime(ignored):
            # Check that the timeout happened C{self.proto.persistentTimeOut}
            # after the last response
            self.assertEquals(self.clock.seconds(),
                              2 * self.proto.persistentTimeOut - 1)

        d1.addCallback(check)
        return d1

    def test_timeoutNotReset(self):
        """
        Check that timeout is not resetted for every command, but keep the
        timeout from the first command without response.
        """
        d1 = self.proto.get("foo")
        d3 = Deferred()
        self.proto.connectionLost = d3.callback

        self.clock.advance(self.proto.persistentTimeOut - 1)
        d2 = self.proto.get("bar")
        self.clock.advance(1)
        self.assertFailure(d1, TimeoutError)
        self.assertFailure(d2, TimeoutError)
        return gatherResults([d1, d2, d3])

    def test_timeoutCleanDeferreds(self):
        """
        C{timeoutConnection} cleans the list of commands that it fires with
        C{TimeoutError}: C{connectionLost} doesn't try to fire them again, but
        sets the disconnected state so that future commands fail with a
        C{RuntimeError}.
        """
        d1 = self.proto.get("foo")
        self.clock.advance(self.proto.persistentTimeOut)
        self.assertFailure(d1, TimeoutError)
        d2 = self.proto.get("bar")
        self.assertFailure(d2, RuntimeError)
        return gatherResults([d1, d2])

    def test_connectionLost(self):
        """
        When disconnection occurs while commands are still outstanding, the
        commands fail.
        """
        d1 = self.proto.get("foo")
        d2 = self.proto.get("bar")
        self.transport.loseConnection()
        done = DeferredList([d1, d2], consumeErrors=True)

        def checkFailures(results):
            for success, result in results:
                self.assertFalse(success)
                result.trap(ConnectionDone)

        return done.addCallback(checkFailures)

    def test_tooLongKey(self):
        """
        An error is raised when trying to use a too long key: the called
        command returns a L{Deferred} which fails with a L{ClientError}.
        """
        d1 = self.assertFailure(self.proto.set("a" * 500, "bar"), ClientError)
        d2 = self.assertFailure(self.proto.increment("a" * 500), ClientError)
        d3 = self.assertFailure(self.proto.get("a" * 500), ClientError)
        d4 = self.assertFailure(self.proto.append("a" * 500, "bar"),
                                ClientError)
        d5 = self.assertFailure(self.proto.prepend("a" * 500, "bar"),
                                ClientError)
        d6 = self.assertFailure(self.proto.getMultiple(["foo", "a" * 500]),
                                ClientError)
        return gatherResults([d1, d2, d3, d4, d5, d6])

    def test_invalidCommand(self):
        """
        When an unknown command is sent directly (not through public API), the
        server answers with an B{ERROR} token, and the command fails with
        L{NoSuchCommand}.
        """
        d = self.proto._set("egg", "foo", "bar", 0, 0, "")
        self.assertEquals(self.transport.value(), "egg foo 0 0 3\r\nbar\r\n")
        self.assertFailure(d, NoSuchCommand)
        self.proto.dataReceived("ERROR\r\n")
        return d

    def test_clientError(self):
        """
        Test the L{ClientError} error: when the server sends a B{CLIENT_ERROR}
        token, the originating command fails with L{ClientError}, and the error
        contains the text sent by the server.
        """
        a = "eggspamm"
        d = self.proto.set("foo", a)
        self.assertEquals(self.transport.value(),
                          "set foo 0 0 8\r\neggspamm\r\n")
        self.assertFailure(d, ClientError)

        def check(err):
            self.assertEquals(str(err), "We don't like egg and spam")

        d.addCallback(check)
        self.proto.dataReceived("CLIENT_ERROR We don't like egg and spam\r\n")
        return d

    def test_serverError(self):
        """
        Test the L{ServerError} error: when the server sends a B{SERVER_ERROR}
        token, the originating command fails with L{ServerError}, and the error
        contains the text sent by the server.
        """
        a = "eggspamm"
        d = self.proto.set("foo", a)
        self.assertEquals(self.transport.value(),
                          "set foo 0 0 8\r\neggspamm\r\n")
        self.assertFailure(d, ServerError)

        def check(err):
            self.assertEquals(str(err), "zomg")

        d.addCallback(check)
        self.proto.dataReceived("SERVER_ERROR zomg\r\n")
        return d

    def test_unicodeKey(self):
        """
        Using a non-string key as argument to commands raises an error.
        """
        d1 = self.assertFailure(self.proto.set(u"foo", "bar"), ClientError)
        d2 = self.assertFailure(self.proto.increment(u"egg"), ClientError)
        d3 = self.assertFailure(self.proto.get(1), ClientError)
        d4 = self.assertFailure(self.proto.delete(u"bar"), ClientError)
        d5 = self.assertFailure(self.proto.append(u"foo", "bar"), ClientError)
        d6 = self.assertFailure(self.proto.prepend(u"foo", "bar"), ClientError)
        d7 = self.assertFailure(self.proto.getMultiple(["egg", 1]),
                                ClientError)
        return gatherResults([d1, d2, d3, d4, d5, d6, d7])

    def test_unicodeValue(self):
        """
        Using a non-string value raises an error.
        """
        return self.assertFailure(self.proto.set("foo", u"bar"), ClientError)

    def test_pipelining(self):
        """
        Multiple requests can be sent subsequently to the server, and the
        protocol orders the responses correctly and dispatch to the
        corresponding client command.
        """
        d1 = self.proto.get("foo")
        d1.addCallback(self.assertEquals, (0, "bar"))
        d2 = self.proto.set("bar", "spamspamspam")
        d2.addCallback(self.assertEquals, True)
        d3 = self.proto.get("egg")
        d3.addCallback(self.assertEquals, (0, "spam"))
        self.assertEquals(
            self.transport.value(),
            "get foo\r\nset bar 0 0 12\r\nspamspamspam\r\nget egg\r\n")
        self.proto.dataReceived("VALUE foo 0 3\r\nbar\r\nEND\r\n"
                                "STORED\r\n"
                                "VALUE egg 0 4\r\nspam\r\nEND\r\n")
        return gatherResults([d1, d2, d3])

    def test_getInChunks(self):
        """
        If the value retrieved by a C{get} arrive in chunks, the protocol
        is able to reconstruct it and to produce the good value.
        """
        d = self.proto.get("foo")
        d.addCallback(self.assertEquals, (0, "0123456789"))
        self.assertEquals(self.transport.value(), "get foo\r\n")
        self.proto.dataReceived("VALUE foo 0 10\r\n0123456")
        self.proto.dataReceived("789")
        self.proto.dataReceived("\r\nEND")
        self.proto.dataReceived("\r\n")
        return d

    def test_append(self):
        """
        L{MemCacheProtocol.append} behaves like a L{MemCacheProtocol.set}
        method: it returns a L{Deferred} which is called back with C{True} when
        the operation succeeds.
        """
        return self._test(self.proto.append("foo", "bar"),
                          "append foo 0 0 3\r\nbar\r\n", "STORED\r\n", True)

    def test_prepend(self):
        """
        L{MemCacheProtocol.prepend} behaves like a L{MemCacheProtocol.set}
        method: it returns a L{Deferred} which is called back with C{True} when
        the operation succeeds.
        """
        return self._test(self.proto.prepend("foo", "bar"),
                          "prepend foo 0 0 3\r\nbar\r\n", "STORED\r\n", True)

    def test_gets(self):
        """
        L{MemCacheProtocol.get} handles an additional cas result when
        C{withIdentifier} is C{True} and forward it in the resulting
        L{Deferred}.
        """
        return self._test(self.proto.get("foo", True), "gets foo\r\n",
                          "VALUE foo 0 3 1234\r\nbar\r\nEND\r\n",
                          (0, "1234", "bar"))

    def test_emptyGets(self):
        """
        Test getting a non-available key with gets: it succeeds but return
        C{None} as value, C{0} as flag and an empty cas value.
        """
        return self._test(self.proto.get("foo", True), "gets foo\r\n",
                          "END\r\n", (0, "", None))

    def test_getsMultiple(self):
        """
        L{MemCacheProtocol.getMultiple} handles an additional cas field in the
        returned tuples if C{withIdentifier} is C{True}.
        """
        return self._test(
            self.proto.getMultiple(["foo", "bar"], True), "gets foo bar\r\n",
            "VALUE foo 0 3 1234\r\negg\r\nVALUE bar 0 4 2345\r\nspam\r\nEND\r\n",
            {
                'bar': (0, '2345', 'spam'),
                'foo': (0, '1234', 'egg')
            })

    def test_getsMultipleWithEmpty(self):
        """
        When getting a non-available key with L{MemCacheProtocol.getMultiple}
        when C{withIdentifier} is C{True}, the other keys are retrieved
        correctly, and the non-available key gets a tuple of C{0} as flag,
        C{None} as value, and an empty cas value.
        """
        return self._test(self.proto.getMultiple(["foo", "bar"],
                                                 True), "gets foo bar\r\n",
                          "VALUE foo 0 3 1234\r\negg\r\nEND\r\n", {
                              'bar': (0, '', None),
                              'foo': (0, '1234', 'egg')
                          })

    def test_checkAndSet(self):
        """
        L{MemCacheProtocol.checkAndSet} passes an additional cas identifier
        that the server handles to check if the data has to be updated.
        """
        return self._test(self.proto.checkAndSet("foo", "bar", cas="1234"),
                          "cas foo 0 0 3 1234\r\nbar\r\n", "STORED\r\n", True)

    def test_casUnknowKey(self):
        """
        When L{MemCacheProtocol.checkAndSet} response is C{EXISTS}, the
        resulting L{Deferred} fires with C{False}.
        """
        return self._test(self.proto.checkAndSet("foo", "bar", cas="1234"),
                          "cas foo 0 0 3 1234\r\nbar\r\n", "EXISTS\r\n", False)
コード例 #34
0
class TestTrueAfricanUssdTransport(VumiTestCase):

    SESSION_INIT_BODY = {
        'session': '1',
        'msisdn': '+27724385170',
        'shortcode': '*23#'
    }

    @inlineCallbacks
    def setUp(self):
        self.tx_helper = self.add_helper(
            TransportHelper(TrueAfricanUssdTransport))
        self.clock = Clock()
        self.patch(TrueAfricanUssdTransport, 'get_clock', lambda _: self.clock)
        self.transport = yield self.tx_helper.get_transport({
            'interface':
            '127.0.0.1',
            'port':
            0,
            'request_timeout':
            10,
        })
        self.service_url = self.get_service_url(self.transport)

    def get_service_url(self, transport):
        """
        Get the URL for the HTTP resource. Requires the worker to be started.
        """
        addr = transport.web_resource.getHost()
        return "http://%s:%s/" % (addr.host, addr.port)

    def web_client(self):
        return Proxy(self.service_url)

    @inlineCallbacks
    def test_session_new(self):
        client = self.web_client()
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "Oh Hai!")

        # verify the transport -> application message
        self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
        self.assertEqual(msg['transport_type'], "ussd")
        self.assertEqual(msg['session_event'],
                         TransportUserMessage.SESSION_NEW)
        self.assertEqual(msg['from_addr'], '+27724385170')
        self.assertEqual(msg['to_addr'], '*23#')
        self.assertEqual(msg['content'], None)

        resp = yield resp_d
        self.assertEqual(resp, {
            'message': 'Oh Hai!',
            'session': '1',
            'type': 'cont'
        })

    @inlineCallbacks
    def test_session_resume(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "pong")

        yield resp_d
        yield self.tx_helper.clear_dispatched_inbound()

        # resume session
        resp_d = client.callRemote('USSD.CONT', {
            'session': '1',
            'response': 'pong'
        })

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "ping")

        # verify the dispatched inbound message
        self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
        self.assertEqual(msg['transport_type'], "ussd")
        self.assertEqual(msg['session_event'],
                         TransportUserMessage.SESSION_RESUME)
        self.assertEqual(msg['from_addr'], '+27724385170')
        self.assertEqual(msg['to_addr'], '*23#')
        self.assertEqual(msg['content'], 'pong')

        resp = yield resp_d
        self.assertEqual(resp, {
            'message': 'ping',
            'session': '1',
            'type': 'cont'
        })

    @inlineCallbacks
    def test_session_end_user_initiated(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "ping")

        yield resp_d
        yield self.tx_helper.clear_dispatched_inbound()

        # user initiated session termination
        resp_d = client.callRemote('USSD.END', {'session': '1'})

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
        self.assertEqual(msg['transport_type'], "ussd")
        self.assertEqual(msg['session_event'],
                         TransportUserMessage.SESSION_CLOSE)
        self.assertEqual(msg['from_addr'], '+27724385170')
        self.assertEqual(msg['to_addr'], '*23#')
        self.assertEqual(msg['content'], None)

        resp = yield resp_d
        self.assertEqual(resp, {})

    @inlineCallbacks
    def test_session_end_application_initiated(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)
        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)

        self.tx_helper.make_dispatch_reply(msg, "ping")
        yield resp_d
        yield self.tx_helper.clear_dispatched_inbound()

        # end session
        resp_d = client.callRemote('USSD.CONT', {
            'session': '1',
            'response': 'o rly?'
        })

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg,
                                           "kthxbye",
                                           continue_session=False)

        resp = yield resp_d
        self.assertEqual(resp, {
            'message': 'kthxbye',
            'session': '1',
            'type': 'end'
        })

    @inlineCallbacks
    def test_ack_for_outbound_message(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        # send response
        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        rep = yield self.tx_helper.make_dispatch_reply(msg, "ping")
        yield resp_d

        [ack] = yield self.tx_helper.wait_for_dispatched_events(1)
        self.assertEqual(ack['event_type'], 'ack')
        self.assertEqual(ack['user_message_id'], rep['message_id'])
        self.assertEqual(ack['sent_message_id'], rep['message_id'])

    @inlineCallbacks
    def test_nack_for_outbound_message(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)

        # cancel the request and mute the resulting error.
        request = self.transport._requests[msg['message_id']]
        request.http_request.connectionLost(ConnectionLost())
        resp_d.cancel()
        resp_d.addErrback(lambda f: None)

        # send response
        rep = yield self.tx_helper.make_dispatch_reply(msg, "ping")
        [nack] = yield self.tx_helper.wait_for_dispatched_events(1)

        self.assertEqual(nack['event_type'], 'nack')
        self.assertEqual(nack['user_message_id'], rep['message_id'])
        self.assertEqual(nack['sent_message_id'], rep['message_id'])

    @inlineCallbacks
    def test_nack_for_request_timeout(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)

        self.clock.advance(10.1)  # .1 second after timeout

        rep = yield self.tx_helper.make_dispatch_reply(msg, "ping")
        yield resp_d

        [nack] = yield self.tx_helper.wait_for_dispatched_events(1)
        self.assertEqual(nack['event_type'], 'nack')
        self.assertEqual(nack['user_message_id'], rep['message_id'])
        self.assertEqual(nack['sent_message_id'], rep['message_id'])
        self.assertEqual(nack['nack_reason'], 'Exceeded request timeout')

    @inlineCallbacks
    def test_nack_for_invalid_outbound_message(self):
        msg = yield self.tx_helper.make_dispatch_outbound("outbound")
        [nack] = yield self.tx_helper.wait_for_dispatched_events(1)
        self.assertEqual(nack['user_message_id'], msg['message_id'])
        self.assertEqual(nack['sent_message_id'], msg['message_id'])
        self.assertEqual(nack['nack_reason'],
                         'Missing in_reply_to, content or session_id fields')

    @inlineCallbacks
    def test_timeout(self):
        client = self.web_client()

        # initiate session
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        with LogCatcher(message='Timing out') as lc:
            self.assertTrue(msg['message_id'] in self.transport._requests)
            self.clock.advance(10.1)  # .1 second after timeout
            self.assertFalse(msg['message_id'] in self.transport._requests)
            [warning] = lc.messages()
            self.assertEqual(warning,
                             'Timing out on response for +27724385170')
            resp = yield resp_d
            self.assertEqual(
                resp, {
                    'message': ('We encountered an error while processing'
                                ' your message'),
                    'type':
                    'end'
                })

    @inlineCallbacks
    def test_request_tracking(self):
        """
        Verify that the transport cleans up after finishing a request
        """
        client = self.web_client()
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "pong")
        self.assertTrue(msg['message_id'] in self.transport._requests)
        yield resp_d
        self.assertFalse(msg['message_id'] in self.transport._requests)

    @inlineCallbacks
    def test_missing_session(self):
        """
        Verify that the transport handles missing session data in a
        graceful manner
        """
        client = self.web_client()
        resp_d = client.callRemote('USSD.INIT', self.SESSION_INIT_BODY)

        [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
        self.tx_helper.make_dispatch_reply(msg, "pong")
        yield resp_d
        yield self.tx_helper.clear_dispatched_inbound()

        # simulate Redis falling over
        yield self.transport.session_manager.redis._purge_all()

        # resume
        resp_d = client.callRemote('USSD.CONT', {
            'session': '1',
            'response': 'o rly?'
        })

        resp = yield resp_d
        self.assertEqual(
            resp, {
                'message': ('We encountered an error while processing'
                            ' your message'),
                'type':
                'end'
            })
コード例 #35
0
ファイル: test_client.py プロジェクト: exarkun/bloc
class BlocClientTests(SynchronousTestCase):
    """
    Tests for :obj:`client.BlocClient`
    """

    def setUp(self):
        self.clock = Clock()
        self.client = BlocClient(self.clock, 'server:8989', 3, session_id='sid')

    def setup_treq(self, code=200, body={}):
        self.async_failures = []
        self.stubs = RequestSequence(
            [((b"get", "http://server:8989/index", {},
               HasHeaders({"Bloc-Session-ID": ["sid"]}), b''),
              (code, {}, json.dumps(body).encode("utf-8")))],
            self.async_failures.append)
        self.client.treq = StubTreq(StringStubbingResource(self.stubs))

    def test_settled(self):
        """
        When getting index returns SETTLED then it is set and is returned in `get_index_total`
        """
        self.setup_treq(body={"status": "SETTLED", "index": 1, "total": 1})
        self.client.startService()
        with self.stubs.consume(self.fail):
            self.assertEqual(self.client.get_index_total(), (1, 1))
            self.assertTrue(self.client._settled)
        self.assertEqual(self.async_failures, [])

    def test_settling(self):
        """
        When getting index returns SETTLING, then get_index_total returns None
        """
        self.setup_treq(body={"status": "SETTLING"})
        self.client.startService()
        with self.stubs.consume(self.fail):
            self.assertIsNone(self.client.get_index_total())
        self.assertEqual(self.async_failures, [])

    def test_get_errors(self):
        """
        If get index errors, then get_index_total will return None
        """
        self.setup_treq(code=500)
        self.client.startService()
        with self.stubs.consume(self.fail):
            self.assertIsNone(self.client.get_index_total())
        self.assertEqual(self.async_failures, [])

    def test_get_times_out(self):
        """
        If get index times out then get_index_total will return None
        """
        # lets start with settled
        self.test_settled()
        # setup client that does not return
        self.client.treq = StubTreq(DeferredResource(Deferred()))
        # next heartbeat to get index again
        self.clock.advance(3)
        # no response
        self.clock.advance(5)
        self.assertIsNone(self.client.get_index_total())

    def test_sequence(self):
        """
        Test sequence of changes from server:
        TODO: this should probably be done via hypothesis
        SETTLING -> SETTLED -> ERRORS -> SETTLING -> SETTLED
        """
        # settling
        self.setup_treq(body={"status": "SETTLING"})
        self.client.startService()
        with self.stubs.consume(self.fail):
            self.assertIsNone(self.client.get_index_total())

        # settled
        self.setup_treq(body={"status": "SETTLED", "index": 1, "total": 3})
        self.clock.advance(3)
        with self.stubs.consume(self.fail):
            self.assertEqual(self.client.get_index_total(), (1, 3))

        # errors
        self.setup_treq(code=500)
        self.clock.advance(3)
        with self.stubs.consume(self.fail):
            self.assertIsNone(self.client.get_index_total())

        # settling
        self.setup_treq(body={"status": "SETTLING"})
        self.clock.advance(3)
        with self.stubs.consume(self.fail):
            self.assertIsNone(self.client.get_index_total())

        # settled
        self.setup_treq(body={"status": "SETTLED", "index": 3, "total": 4})
        self.clock.advance(3)
        with self.stubs.consume(self.fail):
            self.assertEqual(self.client.get_index_total(), (3, 4))
        self.assertEqual(self.async_failures, [])

    def test_stopservice_deletes_session(self):
        """
        :func:`stopService` will delete the session and will stop the loop
        """
        self.test_settled()
        stubs = RequestSequence(
            [((b"delete", "http://server:8989/session", {},
               HasHeaders({"Bloc-Session-ID": ["sid"]}), b''),
              (200, {}, b''))],
            self.fail)
        self.client.treq = StubTreq(StringStubbingResource(stubs))
        with stubs.consume(self.fail):
            d = self.client.stopService()
            self.assertIsNone(self.successResultOf(d))
            # Moving time would fail treq if it tried to heartbeat
            self.clock.advance(4)

    def test_stopservice_ignores_delete_session(self):
        """
        :func:`stopService` will try deleting the session for 1 second and will stop the loop
        """
        self.test_settled()
        self.client.treq = StubTreq(DeferredResource(Deferred()))
        d = self.client.stopService()
        self.assertNoResult(d)
        self.clock.advance(1)
        self.assertIsNone(self.successResultOf(d))
        # Moving time would fail treq if it tried to heartbeat
        self.clock.advance(4)
コード例 #36
0
ファイル: protocol.py プロジェクト: rebeccaroisin/meduse
def test_leader_heartbeat():
    clock = Clock()

    factory = MeduseFactory("node0", reactor=clock)
    factory.others.append(("127.0.0.1", 8080, "foo1"))

    ## Put in candidate state
    factory.start_leader_election()
    #client_factory = LeaderClientFactory(factory, ("127.0.0.1", 8080, "foo1"))
    client_factory = factory.conn[0]

    instance = client_factory.buildProtocol(None)
    tr = proto_helpers.StringTransport()
    instance.makeConnection(tr)

    assert factory.state == CANDIDATE

    instance.dataReceived(package_data(("ReplyVote", 0, True)))
    print "Votes", factory.votes
    assert factory.state == LEADER

    print factory.conn
    for c in factory.conn:
        print c.client_info
    assert len(factory.conn) == 1
    print factory.election_timeout, factory.match_index, factory.next_index

    tr.clear()
    clock.pump([0.005] * 200)

    import re
    assert len(re.findall("AppendEntries", tr.value())) > 5

    assert factory.state == LEADER

    ## Now we need to test that the leader sends the correct messages

    ## Test message sent after log updated
    tr.clear()
    assert len(factory.log) == 1
    assert factory.current_term == 1

    # add some dummy entries to the log
    factory.log["2"] = (1, "NEXT")
    factory.log["3"] = (1, "PROCHAIN")
    assert len(factory.log) == 3

    # check the message that is sent out
    clock.advance(30 / 1000.0)
    (msg_type, current_term, name, log_index, log_term, entries,
     msg_commit_index) = unpackage_data(tr.value())[0]
    assert msg_type == "AppendEntries"
    assert current_term == 1
    assert len(entries) == 2
    assert log_index == 0
    assert log_term == 0
    assert msg_commit_index == 1
    tr.clear()

    # add another entry to the log
    factory.log["4"] = (1, "LAST")
    assert len(factory.log) == 4
    clock.advance(30 / 1000.0)
    (msg_type, current_term, name, log_index, log_term, entries,
     msg_commit_index) = unpackage_data(tr.value())[0]
    assert len(entries) == 3

    factory.debug_cleanup()
コード例 #37
0
class KademliaProtocolTest(unittest.TestCase):
    """ Test case for the Protocol class """

    udpPort = 9182

    def setUp(self):
        self._reactor = Clock()
        self.node = Node(node_id=b'1' * 48,
                         udpPort=self.udpPort,
                         externalIP="127.0.0.1",
                         listenUDP=listenUDP,
                         resolve=resolve,
                         clock=self._reactor,
                         callLater=self._reactor.callLater)
        self.remote_node = Node(node_id=b'2' * 48,
                                udpPort=self.udpPort,
                                externalIP="127.0.0.2",
                                listenUDP=listenUDP,
                                resolve=resolve,
                                clock=self._reactor,
                                callLater=self._reactor.callLater)
        self.remote_contact = self.node.contact_manager.make_contact(
            b'2' * 48, '127.0.0.2', 9182, self.node._protocol)
        self.us_from_them = self.remote_node.contact_manager.make_contact(
            b'1' * 48, '127.0.0.1', 9182, self.remote_node._protocol)
        self.node.start_listening()
        self.remote_node.start_listening()

    @defer.inlineCallbacks
    def tearDown(self):
        yield self.node.stop()
        yield self.remote_node.stop()
        del self._reactor

    @defer.inlineCallbacks
    def testReactor(self):
        """ Tests if the reactor can start/stop the protocol correctly """

        d = defer.Deferred()
        self._reactor.callLater(1, d.callback, True)
        self._reactor.advance(1)
        result = yield d
        self.assertTrue(result)

    @defer.inlineCallbacks
    def testRPCTimeout(self):
        """ Tests if a RPC message sent to a dead remote node times out correctly """
        yield self.remote_node.stop()
        self._reactor.pump([1 for _ in range(10)])
        self.node.addContact(self.remote_contact)

        @rpcmethod
        def fake_ping(*args, **kwargs):
            time.sleep(lbrynet.dht.constants.rpcTimeout + 1)
            return 'pong'

        real_ping = self.node.ping
        real_timeout = lbrynet.dht.constants.rpcTimeout
        real_attempts = lbrynet.dht.constants.rpcAttempts
        lbrynet.dht.constants.rpcAttempts = 1
        lbrynet.dht.constants.rpcTimeout = 1

        self.node.ping = fake_ping
        # Make sure the contact was added
        self.assertFalse(
            self.remote_contact not in self.node.contacts,
            'Contact not added to fake node (error in test code)')
        self.node.start_listening()

        # Run the PING RPC (which should raise a timeout error)
        df = self.remote_contact.ping()

        def check_timeout(err):
            self.assertEqual(err.type, TimeoutError)

        df.addErrback(check_timeout)

        def reset_values():
            self.node.ping = real_ping
            lbrynet.dht.constants.rpcTimeout = real_timeout
            lbrynet.dht.constants.rpcAttempts = real_attempts

        # See if the contact was removed due to the timeout
        def check_removed_contact():
            self.assertFalse(
                self.remote_contact in self.node.contacts,
                'Contact was not removed after RPC timeout; check exception types.'
            )

        df.addCallback(lambda _: reset_values())

        # Stop the reactor if a result arrives (timeout or not)
        df.addCallback(lambda _: check_removed_contact())
        self._reactor.pump([1 for _ in range(20)])

    @defer.inlineCallbacks
    def testRPCRequest(self):
        """ Tests if a valid RPC request is executed and responded to correctly """

        yield self.node.addContact(self.remote_contact)

        self.error = None

        def handleError(f):
            self.error = 'An RPC error occurred: %s' % f.getErrorMessage()

        def handleResult(result):
            expectedResult = b'pong'
            if result != expectedResult:
                self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' \
                             % (expectedResult, result)

        # Simulate the RPC
        df = self.remote_contact.ping()
        df.addCallback(handleResult)
        df.addErrback(handleError)

        self._reactor.advance(2)
        yield df

        self.assertFalse(self.error, self.error)
        # The list of sent RPC messages should be empty at this stage
        self.assertEqual(
            len(self.node._protocol._sentMessages), 0,
            'The protocol is still waiting for a RPC result, '
            'but the transaction is already done!')

    def testRPCAccess(self):
        """ Tests invalid RPC requests
        Verifies that a RPC request for an existing but unpublished
        method is denied, and that the associated (remote) exception gets
        raised locally """

        self.assertRaises(AttributeError, getattr, self.remote_contact,
                          "not_a_rpc_function")

    def testRPCRequestArgs(self):
        """ Tests if an RPC requiring arguments is executed correctly """

        self.node.addContact(self.remote_contact)
        self.error = None

        def handleError(f):
            self.error = 'An RPC error occurred: %s' % f.getErrorMessage()

        def handleResult(result):
            expectedResult = b'pong'
            if result != expectedResult:
                self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % \
                             (expectedResult, result)

        # Publish the "local" node on the network
        self.node.start_listening()
        # Simulate the RPC
        df = self.remote_contact.ping()
        df.addCallback(handleResult)
        df.addErrback(handleError)
        self._reactor.pump([1 for _ in range(10)])
        self.assertFalse(self.error, self.error)
        # The list of sent RPC messages should be empty at this stage
        self.assertEqual(
            len(self.node._protocol._sentMessages), 0,
            'The protocol is still waiting for a RPC result, '
            'but the transaction is already done!')

    @defer.inlineCallbacks
    def testDetectProtocolVersion(self):
        original_findvalue = self.remote_node.findValue
        fake_blob = unhexlify("AB" * 48)

        @rpcmethod
        def findValue(contact, key):
            result = original_findvalue(contact, key)
            result.pop(b'protocolVersion')
            return result

        self.remote_node.findValue = findValue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue('protocolVersion' not in find_value_response)

        self.remote_node.findValue = original_findvalue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 1)
        self.assertTrue('protocolVersion' not in find_value_response)

        self.remote_node.findValue = findValue
        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue('protocolVersion' not in find_value_response)

    @defer.inlineCallbacks
    def testStoreToPre_0_20_0_Node(self):
        def _dont_migrate(contact, method, *args):
            return args, {}

        self.remote_node._protocol._migrate_incoming_rpc_args = _dont_migrate

        original_findvalue = self.remote_node.findValue
        original_store = self.remote_node.store

        @rpcmethod
        def findValue(contact, key):
            result = original_findvalue(contact, key)
            if b'protocolVersion' in result:
                result.pop(b'protocolVersion')
            return result

        @rpcmethod
        def store(contact,
                  key,
                  value,
                  originalPublisherID=None,
                  self_store=False,
                  **kwargs):
            self.assertTrue(len(key) == 48)
            self.assertSetEqual(set(value.keys()),
                                {b'token', b'lbryid', b'port'})
            self.assertFalse(self_store)
            self.assertDictEqual(kwargs, {})
            return original_store(  # pylint: disable=too-many-function-args
                contact, key, value[b'token'], value[b'port'],
                originalPublisherID, 0)

        self.remote_node.findValue = findValue
        self.remote_node.store = store

        fake_blob = unhexlify("AB" * 48)

        d = self.remote_contact.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(b'protocolVersion' not in find_value_response)
        token = find_value_response[b'token']
        d = self.remote_contact.store(fake_blob, token, 3333,
                                      self.node.node_id, 0)
        self._reactor.advance(3)
        response = yield d
        self.assertEqual(response, b'OK')
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(self.remote_node._dataStore.hasPeersForBlob(fake_blob))
        self.assertEqual(len(self.remote_node._dataStore.getStoringContacts()),
                         1)

    @defer.inlineCallbacks
    def testStoreFromPre_0_20_0_Node(self):
        def _dont_migrate(contact, method, *args):
            return args

        self.remote_node._protocol._migrate_outgoing_rpc_args = _dont_migrate

        us_from_them = self.remote_node.contact_manager.make_contact(
            b'1' * 48, '127.0.0.1', self.udpPort, self.remote_node._protocol)

        fake_blob = unhexlify("AB" * 48)

        d = us_from_them.findValue(fake_blob)
        self._reactor.advance(3)
        find_value_response = yield d
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(b'protocolVersion' not in find_value_response)
        token = find_value_response[b'token']
        us_from_them.update_protocol_version(0)
        d = self.remote_node._protocol.sendRPC(
            us_from_them, b"store", (fake_blob, {
                b'lbryid': self.remote_node.node_id,
                b'token': token,
                b'port': 3333
            }))
        self._reactor.advance(3)
        response = yield d
        self.assertEqual(response, b'OK')
        self.assertEqual(self.remote_contact.protocolVersion, 0)
        self.assertTrue(self.node._dataStore.hasPeersForBlob(fake_blob))
        self.assertEqual(len(self.node._dataStore.getStoringContacts()), 1)
        self.assertIs(self.node._dataStore.getStoringContacts()[0],
                      self.remote_contact)

    @defer.inlineCallbacks
    def test_find_node(self):
        self.node.addContact(
            self.node.contact_manager.make_contact(self.remote_contact.id,
                                                   self.remote_contact.address,
                                                   self.remote_contact.port,
                                                   self.node._protocol))
        result = self.node.findContact(b'0' * 48)
        for _ in range(6):
            self._reactor.advance(1)
        self.assertEqual((yield result), None)
        result = self.node.findContact(self.remote_contact.id)
        for _ in range(6):
            self._reactor.advance(1)
        self.assertEqual((yield result).id, self.remote_contact.id)
コード例 #38
0
class TestSmppService(VumiTestCase):
    @inlineCallbacks
    def setUp(self):
        self.clock = Clock()
        self.persistence_helper = self.add_helper(PersistenceHelper())
        self.redis = yield self.persistence_helper.get_redis_manager()
        self.fake_smsc = FakeSMSC(auto_accept=False)
        self.default_config = {
            'transport_name': 'sphex_transport',
            'twisted_endpoint': self.fake_smsc.endpoint,
            'system_id': 'system_id',
            'password': '******',
        }

    def get_service(self, config={}, bind_type='TRX', start=True):
        """
        Create and optionally start a new service object.
        """
        cfg = self.default_config.copy()
        cfg.update(config)
        dummy_transport = DummySmppTransport(self.clock, self.redis, cfg)
        service = SmppService(self.fake_smsc.endpoint, bind_type,
                              dummy_transport)
        service.clock = self.clock

        d = succeed(service)
        if start:
            d.addCallback(self.start_service)
        return d

    def start_service(self, service, accept_connection=True):
        """
        Start the given service.
        """
        service.startService()
        self.clock.advance(0)
        d = self.fake_smsc.await_connecting()
        if accept_connection:
            d.addCallback(lambda _: self.fake_smsc.accept_connection())
        return d.addCallback(lambda _: service)

    def lookup_message_ids(self, service, seq_nums):
        """
        Find vumi message ids associated with SMPP sequence numbers.
        """
        lookup_func = service.message_stash.get_sequence_number_message_id
        return gatherResults([lookup_func(seq_num) for seq_num in seq_nums])

    def set_sequence_number(self, service, seq_nr):
        return service.sequence_generator.redis.set(
            'smpp_last_sequence_number', seq_nr)

    @inlineCallbacks
    def test_start_sequence(self):
        """
        The service goes through several states while starting.
        """
        # New service, never started.
        service = yield self.get_service(start=False)
        self.assertEqual(service.running, False)
        self.assertEqual(service.get_bind_state(), EsmeProtocol.CLOSED_STATE)

        # Start, but don't connect.
        yield self.start_service(service, accept_connection=False)
        self.assertEqual(service.running, True)
        self.assertEqual(service.get_bind_state(), EsmeProtocol.CLOSED_STATE)

        # Connect, but don't bind.
        yield self.fake_smsc.accept_connection()
        self.assertEqual(service.running, True)
        self.assertEqual(service.get_bind_state(), EsmeProtocol.OPEN_STATE)
        bind_pdu = yield self.fake_smsc.await_pdu()
        self.assertEqual(command_id(bind_pdu), 'bind_transceiver')

        # Bind.
        yield self.fake_smsc.bind(bind_pdu)
        self.assertEqual(service.running, True)
        self.assertEqual(service.get_bind_state(),
                         EsmeProtocol.BOUND_STATE_TRX)

    @inlineCallbacks
    def test_connect_retries(self):
        """
        If we fail to connect, we retry.
        """
        service = yield self.get_service(start=False)
        self.assertEqual(self.fake_smsc.has_pending_connection(), False)

        # Start, but don't connect.
        yield self.start_service(service, accept_connection=False)
        self.assertEqual(self.fake_smsc.has_pending_connection(), True)
        self.assertEqual(service._protocol, None)
        self.assertEqual(service.retries, 1)

        # Reject the connection.
        yield self.fake_smsc.reject_connection()
        self.assertEqual(service._protocol, None)
        self.assertEqual(service.retries, 2)

        # Advance to the next connection attempt.
        self.clock.advance(service.delay)
        self.assertEqual(self.fake_smsc.has_pending_connection(), True)
        self.assertEqual(service._protocol, None)
        self.assertEqual(service.retries, 2)

        # Accept the connection.
        yield self.fake_smsc.accept_connection()
        self.assertEqual(service.running, True)
        self.assertNotEqual(service._protocol, None)

    @inlineCallbacks
    def test_submit_sm(self):
        """
        When bound, we can send a message.
        """
        service = yield self.get_service()
        yield self.fake_smsc.bind()

        seq_nums = yield service.submit_sm('abc123',
                                           'dest_addr',
                                           short_message='foo')
        submit_sm = yield self.fake_smsc.await_pdu()
        self.assertEqual(command_id(submit_sm), 'submit_sm')
        stored_ids = yield self.lookup_message_ids(service, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_unbound(self):
        """
        When unbound, we can't send a message.
        """
        service = yield self.get_service()

        self.assertRaises(EsmeProtocolError,
                          service.submit_sm,
                          'abc123',
                          'dest_addr',
                          short_message='foo')

    @inlineCallbacks
    def test_submit_sm_not_connected(self):
        """
        When not connected, we can't send a message.
        """
        service = yield self.get_service(start=False)
        yield self.start_service(service, accept_connection=False)

        self.assertRaises(EsmeProtocolError,
                          service.submit_sm,
                          'abc123',
                          'dest_addr',
                          short_message='foo')

    @skiptest("FIXME: We don't actually unbind and disconnect yet.")
    @inlineCallbacks
    def test_handle_unbind(self):
        """
        If the SMSC sends an unbind command, we respond and disconnect.
        """
        service = yield self.get_service()
        yield self.fake_smsc.bind()

        self.assertEqual(service.is_bound(), True)
        self.fake_smsc.send_pdu(Unbind(7))
        unbind_resp_pdu = yield self.fake_smsc.await_pdu()
        self.assertEqual(command_id(unbind_resp_pdu), 'unbind_resp')
        self.assertEqual(service.is_bound(), False)

    @inlineCallbacks
    def test_csm_split_message(self):
        """
        A multipart message is split into chunks such that the smallest number
        of message parts are required.
        """
        service = yield self.get_service()

        split = lambda msg: service.csm_split_message(msg.encode('utf-8'))

        # these are fine because they're in the 7-bit character set
        self.assertEqual(1, len(split(u'&' * 140)))
        self.assertEqual(1, len(split(u'&' * 160)))
        # ± is not in the 7-bit character set so it should utf-8 encode it
        # which bumps it over the 140 bytes
        self.assertEqual(2, len(split(u'±' + u'1' * 139)))

    @inlineCallbacks
    def test_submit_sm_long(self):
        """
        A long message can be sent in a single PDU using the optional
        `message_payload` PDU field.
        """
        service = yield self.get_service()
        yield self.fake_smsc.bind()

        long_message = 'This is a long message.' * 20
        seq_nums = yield service.submit_sm_long('abc123', 'dest_addr',
                                                long_message)
        submit_sm = yield self.fake_smsc.await_pdu()
        pdu_opts = unpacked_pdu_opts(submit_sm)

        self.assertEqual('submit_sm', submit_sm['header']['command_id'])
        self.assertEqual(
            None, submit_sm['body']['mandatory_parameters']['short_message'])
        self.assertEqual(''.join('%02x' % ord(c) for c in long_message),
                         pdu_opts['message_payload'])
        stored_ids = yield self.lookup_message_ids(service, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_csm_sar(self):
        """
        A long message can be sent in multiple PDUs with SAR fields set to
        instruct the SMSC to build user data headers.
        """
        service = yield self.get_service({'send_multipart_sar': True})
        yield self.fake_smsc.bind()

        long_message = 'This is a long message.' * 20
        seq_nums = yield service.submit_csm_sar('abc123',
                                                'dest_addr',
                                                short_message=long_message)
        pdus = yield self.fake_smsc.await_pdus(4)
        # seq no 1 == bind_transceiver, 2 == enquire_link, 3 == sar_msg_ref_num
        self.assertEqual([4, 5, 6, 7], seq_nums)
        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            pdu_opts = unpacked_pdu_opts(sm)
            mandatory_parameters = sm['body']['mandatory_parameters']

            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg_parts.append(mandatory_parameters['short_message'])
            self.assertTrue(len(mandatory_parameters['short_message']) <= 130)
            msg_refs.append(pdu_opts['sar_msg_ref_num'])
            self.assertEqual(i + 1, pdu_opts['sar_segment_seqnum'])
            self.assertEqual(4, pdu_opts['sar_total_segments'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual([3, 3, 3, 3], msg_refs)

        stored_ids = yield self.lookup_message_ids(service, seq_nums)
        self.assertEqual(['abc123'] * len(seq_nums), stored_ids)

    @inlineCallbacks
    def test_submit_csm_sar_ref_num_limit(self):
        """
        The SAR reference number is set correctly when the generated reference
        number is larger than 0xFFFF.
        """
        service = yield self.get_service({'send_multipart_sar': True})
        yield self.fake_smsc.bind()
        # forward until we go past 0xFFFF
        yield self.set_sequence_number(service, 0x10000)

        long_message = 'This is a long message.' * 20
        seq_nums = yield service.submit_csm_sar('abc123',
                                                'dest_addr',
                                                short_message=long_message)
        pdus = yield self.fake_smsc.await_pdus(4)
        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            pdu_opts = unpacked_pdu_opts(sm)
            mandatory_parameters = sm['body']['mandatory_parameters']

            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg_parts.append(mandatory_parameters['short_message'])
            self.assertTrue(len(mandatory_parameters['short_message']) <= 130)
            msg_refs.append(pdu_opts['sar_msg_ref_num'])
            self.assertEqual(i + 1, pdu_opts['sar_segment_seqnum'])
            self.assertEqual(4, pdu_opts['sar_total_segments'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual([2, 2, 2, 2], msg_refs)

        stored_ids = yield self.lookup_message_ids(service, seq_nums)
        self.assertEqual(['abc123'] * len(seq_nums), stored_ids)

    @inlineCallbacks
    def test_submit_csm_sar_single_part(self):
        """
        If the content fits in a single message, all the multipart madness is
        avoided.
        """
        service = yield self.get_service({'send_multipart_sar': True})
        yield self.fake_smsc.bind()

        content = 'a' * 160
        seq_numbers = yield service.submit_csm_sar('abc123',
                                                   'dest_addr',
                                                   short_message=content)
        self.assertEqual(len(seq_numbers), 1)
        submit_sm_pdu = yield self.fake_smsc.await_pdu()

        self.assertEqual(command_id(submit_sm_pdu), 'submit_sm')
        self.assertEqual(short_message(submit_sm_pdu), content)
        self.assertEqual(unpacked_pdu_opts(submit_sm_pdu), {})

    @inlineCallbacks
    def test_submit_csm_udh(self):
        """
        A long message can be sent in multiple PDUs with carefully handcrafted
        user data headers.
        """
        service = yield self.get_service({'send_multipart_udh': True})
        yield self.fake_smsc.bind()

        long_message = 'This is a long message.' * 20
        seq_numbers = yield service.submit_csm_udh('abc123',
                                                   'dest_addr',
                                                   short_message=long_message)
        pdus = yield self.fake_smsc.await_pdus(4)
        self.assertEqual(len(seq_numbers), 4)

        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            mandatory_parameters = sm['body']['mandatory_parameters']
            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg = mandatory_parameters['short_message']

            udh_hlen, udh_tag, udh_len, udh_ref, udh_tot, udh_seq = [
                ord(octet) for octet in msg[:6]
            ]
            self.assertEqual(5, udh_hlen)
            self.assertEqual(0, udh_tag)
            self.assertEqual(3, udh_len)
            msg_refs.append(udh_ref)
            self.assertEqual(4, udh_tot)
            self.assertEqual(i + 1, udh_seq)
            self.assertTrue(len(msg) <= 136)
            msg_parts.append(msg[6:])
            self.assertEqual(0x40, mandatory_parameters['esm_class'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual(1, len(set(msg_refs)))

        stored_ids = yield self.lookup_message_ids(service, seq_numbers)
        self.assertEqual(['abc123'] * len(seq_numbers), stored_ids)

    @inlineCallbacks
    def test_submit_csm_udh_ref_num_limit(self):
        """
        User data headers are crafted correctly when the generated reference
        number is larger than 0xFF.
        """
        service = yield self.get_service({'send_multipart_udh': True})
        yield self.fake_smsc.bind()
        # forward until we go past 0xFF
        yield self.set_sequence_number(service, 0x100)

        long_message = 'This is a long message.' * 20
        seq_numbers = yield service.submit_csm_udh('abc123',
                                                   'dest_addr',
                                                   short_message=long_message)
        pdus = yield self.fake_smsc.await_pdus(4)
        self.assertEqual(len(seq_numbers), 4)

        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            mandatory_parameters = sm['body']['mandatory_parameters']
            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg = mandatory_parameters['short_message']

            udh_hlen, udh_tag, udh_len, udh_ref, udh_tot, udh_seq = [
                ord(octet) for octet in msg[:6]
            ]
            self.assertEqual(5, udh_hlen)
            self.assertEqual(0, udh_tag)
            self.assertEqual(3, udh_len)
            msg_refs.append(udh_ref)
            self.assertEqual(4, udh_tot)
            self.assertEqual(i + 1, udh_seq)
            self.assertTrue(len(msg) <= 136)
            msg_parts.append(msg[6:])
            self.assertEqual(0x40, mandatory_parameters['esm_class'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual(1, len(set(msg_refs)))

        stored_ids = yield self.lookup_message_ids(service, seq_numbers)
        self.assertEqual(['abc123'] * len(seq_numbers), stored_ids)

    @inlineCallbacks
    def test_submit_csm_udh_single_part(self):
        """
        If the content fits in a single message, all the multipart madness is
        avoided.
        """
        service = yield self.get_service({'send_multipart_udh': True})
        yield self.fake_smsc.bind()

        content = 'a' * 160
        seq_numbers = yield service.submit_csm_udh('abc123',
                                                   'dest_addr',
                                                   short_message=content)
        self.assertEqual(len(seq_numbers), 1)
        submit_sm_pdu = yield self.fake_smsc.await_pdu()

        self.assertEqual(command_id(submit_sm_pdu), 'submit_sm')
        self.assertEqual(short_message(submit_sm_pdu), content)
        self.assertEqual(
            submit_sm_pdu['body']['mandatory_parameters']['esm_class'], 0)

    @inlineCallbacks
    def test_pdu_cache_persistence(self):
        """
        A cached PDU has an appropriate TTL and can be deleted.
        """
        service = yield self.get_service()

        message_stash = service.message_stash
        config = service.get_config()

        pdu = SubmitSM(1337, short_message="foo")
        yield message_stash.cache_pdu("vumi0", pdu)

        ttl = yield message_stash.redis.ttl(pdu_key(1337))
        self.assertTrue(0 < ttl <= config.submit_sm_expiry)

        pdu_data = yield message_stash.get_cached_pdu(1337)
        self.assertEqual(pdu_data.vumi_message_id, "vumi0")
        self.assertEqual(pdu_data.pdu.get_hex(), pdu.get_hex())

        yield message_stash.delete_cached_pdu(1337)
        deleted_pdu_data = yield message_stash.get_cached_pdu(1337)
        self.assertEqual(deleted_pdu_data, None)
コード例 #39
0
ファイル: test_web.py プロジェクト: jaimematsuda/BolsaVL
class SessionTests(unittest.TestCase):
    """
    Tests for L{server.Session}.
    """
    def setUp(self):
        """
        Create a site with one active session using a deterministic, easily
        controlled clock.
        """
        self.clock = Clock()
        self.uid = b'unique'
        self.site = server.Site(resource.Resource())
        self.session = server.Session(self.site, self.uid, self.clock)
        self.site.sessions[self.uid] = self.session


    def test_defaultReactor(self):
        """
        If not value is passed to L{server.Session.__init__}, the global
        reactor is used.
        """
        session = server.Session(server.Site(resource.Resource()), b'123')
        self.assertIdentical(session._reactor, reactor)


    def test_startCheckingExpiration(self):
        """
        L{server.Session.startCheckingExpiration} causes the session to expire
        after L{server.Session.sessionTimeout} seconds without activity.
        """
        self.session.startCheckingExpiration()

        # Advance to almost the timeout - nothing should happen.
        self.clock.advance(self.session.sessionTimeout - 1)
        self.assertIn(self.uid, self.site.sessions)

        # Advance to the timeout, the session should expire.
        self.clock.advance(1)
        self.assertNotIn(self.uid, self.site.sessions)

        # There should be no calls left over, either.
        self.assertFalse(self.clock.calls)


    def test_expire(self):
        """
        L{server.Session.expire} expires the session.
        """
        self.session.expire()
        # It should be gone from the session dictionary.
        self.assertNotIn(self.uid, self.site.sessions)
        # And there should be no pending delayed calls.
        self.assertFalse(self.clock.calls)


    def test_expireWhileChecking(self):
        """
        L{server.Session.expire} expires the session even if the timeout call
        isn't due yet.
        """
        self.session.startCheckingExpiration()
        self.test_expire()


    def test_notifyOnExpire(self):
        """
        A function registered with L{server.Session.notifyOnExpire} is called
        when the session expires.
        """
        callbackRan = [False]
        def expired():
            callbackRan[0] = True
        self.session.notifyOnExpire(expired)
        self.session.expire()
        self.assertTrue(callbackRan[0])


    def test_touch(self):
        """
        L{server.Session.touch} updates L{server.Session.lastModified} and
        delays session timeout.
        """
        # Make sure it works before startCheckingExpiration
        self.clock.advance(3)
        self.session.touch()
        self.assertEqual(self.session.lastModified, 3)

        # And after startCheckingExpiration
        self.session.startCheckingExpiration()
        self.clock.advance(self.session.sessionTimeout - 1)
        self.session.touch()
        self.clock.advance(self.session.sessionTimeout - 1)
        self.assertIn(self.uid, self.site.sessions)

        # It should have advanced it by just sessionTimeout, no more.
        self.clock.advance(1)
        self.assertNotIn(self.uid, self.site.sessions)
コード例 #40
0
class TestIntervalListener(unittest.TestCase):
    audio_frequency = 1234
    wsprd_path = '/here/is/wsprd'

    def setUp(self):
        self.directory = self.mktemp()
        os.mkdir(self.directory)
        self.context = FakeContext()
        self.clockAndSpawn = Clock()
        self.clockAndSpawn.spawnProcess = self.spawnProcess
        self.spawned = []

        self.listener = WAVIntervalListener(self.directory,
                                            self.context,
                                            self.audio_frequency,
                                            _find_wsprd=self.find_wsprd,
                                            _time=self.time,
                                            _reactor=self.clockAndSpawn)

    def time(self):
        """Return some unix timestamp."""
        return 123987901.7

    def find_wsprd(self):
        return self.wsprd_path

    def spawnProcess(self, processProtocol, executable, args, env, path):
        self.spawned.append((processProtocol, executable, args, env, path))

    def test_interface(self):
        verifyObject(IWAVIntervalListener, self.listener)

    def test_filename(self):
        # Mon Jun  5 00:54:00 UTC 2017
        test_time = 1496624040.0
        filename = self.listener.filename(test_time)

        # frequency_YYMMDD_HHMM.wav
        self.assertEqual(
            filename, os.path.join(self.directory, '12345678_170605_0054.wav'))

    def test_fileOpened(self):
        self.assertEqual(self.listener.get_status(), _STATUS_IDLE)
        self.listener.fileOpened('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_RECEIVING)

    def test_fileClosed(self):
        self.listener.fileOpened('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_RECEIVING)
        self.listener.fileClosed('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_DECODING)
        self.assertEqual(len(self.spawned), 1)

        protocol, executable, args, _, path = self.spawned[0]

        self.assertEqual(protocol.wav_filename, 'some file')
        self.assertEqual(protocol.decode_time, self.time())

        self.assertEqual(executable, self.wsprd_path)

        rx_frequency = self.context.get_absolute_frequency_cell().get()
        dial_frequency = (rx_frequency - self.audio_frequency) / 1e6
        self.assertIn(str(dial_frequency), args)

        self.assertEqual(path, self.directory)

        protocol.processEnded(None)
        self.assertEqual(self.listener.get_status(), _STATUS_IDLE)

    def test_frequency_change(self):
        """If the frequency changes during the recording, don't decode it.

        We especially wouldn't want to upload spots for one band when they
        happened on another.
        """
        self.listener.fileOpened('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_RECEIVING)
        self.context.get_absolute_frequency_cell().set(654321)
        self.clockAndSpawn.advance(1)  # Allow cell subscription to fire.
        self.assertEqual(self.listener.get_status(), _STATUS_IDLE)
        self.listener.fileClosed('some file')
        self.assertFalse(self.spawned)
        self.assertEqual(self.listener.get_status(), _STATUS_IDLE)

    def test_decode_runs_long_status(self):
        """Confirm expected behavior if decode and receive overlap."""
        self.listener.fileOpened('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_RECEIVING)

        self.listener.fileClosed('some file')
        self.assertEqual(self.listener.get_status(), _STATUS_DECODING)

        self.listener.fileOpened('some file')
        self.assertEqual(self.listener.get_status(),
                         _STATUS_DECODING_AND_RECEIVING)

        protocol, _, _, _, _ = self.spawned[0]
        protocol.processEnded(None)
        self.assertEqual(self.listener.get_status(), _STATUS_RECEIVING)
コード例 #41
0
ファイル: test_tracers.py プロジェクト: bbotte/tryfer-zipkin
class BufferingTracerTests(TestCase):
    def setUp(self):
        self.mock_tracer = mock.Mock()
        self.clock = Clock()
        self.tracer = BufferingTracer(self.mock_tracer,
                                      _reactor=self.clock,
                                      max_traces=5)

    def test_verifyObject(self):
        verifyObject(ITracer, self.tracer)

    def test_buffers_traces(self):
        self.tracer.record([(mock.Mock(), [mock.Mock()])])

        self.assertEqual(self.mock_tracer.record.call_count, 0)

    def test_flushes_buffer_on_max_traces(self):
        mockTrace = mock.Mock()
        mockAnnotation = mock.Mock()

        traces = [(mockTrace, [mockAnnotation]) for x in xrange(5)]

        self.tracer.record(traces)

        self.clock.advance(1)

        self.mock_tracer.record.assert_called_once_with(traces)

    def test_flushes_buffer_on_max_idle_time(self):
        mockTrace = mock.Mock()
        mockAnnotation = mock.Mock()

        traces = [(mockTrace, [mockAnnotation])]

        self.tracer.record(traces)

        self.clock.advance(10)

        self.mock_tracer.record.assert_called_once_with(traces)

    def test_new_traces_extend_idle_time(self):
        mockTrace = mock.Mock()
        mockAnnotation = mock.Mock()

        traces = [(mockTrace, [mockAnnotation])]

        # Record one trace and advance the clock by 9 seconds.
        self.tracer.record(traces)

        self.clock.advance(9)

        # We have not reached the idle time so nothing has been traced
        self.assertEqual(self.mock_tracer.record.call_count, 0)

        # Record a new trace which should reset the idle time.
        self.tracer.record(traces)

        # Advance the clock by 5 seconds.  We have now exceeded 10 seconds
        # since the original trace but we should not flush the buffer.
        self.clock.advance(5)

        self.assertEqual(self.mock_tracer.record.call_count, 0)

        # Advance the clock another 5 seconds.  And assert that both recorded
        # traces have been flushed from the buffer.
        self.clock.advance(5)

        self.mock_tracer.record.assert_called_once_with(traces + traces)

    @mock.patch('tryfer.tracers.reactor')
    def test_default_reactor(self, mock_reactor):
        tracer = BufferingTracer(mock.Mock())
        tracer.record([(mock.Mock(), [mock.Mock()])])
        self.assertEqual(mock_reactor.callLater.call_count, 1)

    def test_timer_reset_after_flush(self):
        trace = (mock.Mock(), [mock.Mock()])

        self.tracer.record([trace for x in xrange(5)])
        self.clock.advance(1)
        self.tracer.record([trace])
        self.clock.advance(1)

        self.mock_tracer.record.assert_called_once_with(
            [trace for x in xrange(5)])

    def test_complete_buffer_flushed(self):
        trace = (mock.Mock(), [mock.Mock()])
        self.tracer.record([trace for x in xrange(5)])
        self.tracer.record([trace for x in xrange(3)])

        self.clock.advance(1)

        self.mock_tracer.record.assert_called_once_with(
            [trace for x in xrange(8)])
コード例 #42
0
ファイル: test_protocol.py プロジェクト: ienliven/flocker
class ControlAMPTests(ControlTestCase):
    """
    Tests for ``ControlAMP`` and ``ControlServiceLocator``.
    """
    def setUp(self):
        super(ControlAMPTests, self).setUp()
        self.reactor = Clock()
        self.control_amp_service = build_control_amp_service(
            self,
            self.reactor,
        )
        self.protocol = ControlAMP(self.reactor, self.control_amp_service)
        self.client = LoopbackAMPClient(self.protocol.locator)

    def test_connection_stays_open_on_activity(self):
        """
        The AMP connection remains open when communication is received at
        any time up to the timeout limit.
        """
        self.protocol.makeConnection(StringTransportWithAbort())
        initially_aborted = self.protocol.transport.aborted
        advance_some(self.reactor)
        self.client.callRemote(NoOp)
        self.reactor.advance(PING_INTERVAL.seconds * 1.9)
        # This NoOp will reset the timeout.
        self.client.callRemote(NoOp)
        self.reactor.advance(PING_INTERVAL.seconds * 1.9)
        later_aborted = self.protocol.transport.aborted
        self.assertEqual(
            dict(initially=initially_aborted, later=later_aborted),
            dict(initially=False, later=False))

    def test_connection_closed_on_no_activity(self):
        """
        If no communication has been received for long enough that we expire
        cluster state, the silent connection is forcefully closed.
        """
        self.protocol.makeConnection(StringTransportWithAbort())
        advance_some(self.reactor)
        self.client.callRemote(NoOp)
        self.assertFalse(self.protocol.transport.aborted)
        self.reactor.advance(PING_INTERVAL.seconds * 2)
        self.assertEqual(self.protocol.transport.aborted, True)

    def test_connection_made(self):
        """
        When a connection is made the ``ControlAMP`` is added to the services
        set of connections.
        """
        marker = object()
        self.control_amp_service.connections.add(marker)
        current = self.control_amp_service.connections.copy()
        self.protocol.makeConnection(StringTransportWithAbort())
        self.assertEqual((current, self.control_amp_service.connections),
                         ({marker}, {marker, self.protocol}))

    @capture_logging(assertHasAction, AGENT_CONNECTED, succeeded=True)
    def test_connection_made_send_cluster_status(self, logger):
        """
        When a connection is made the cluster status is sent to the new client.
        """
        sent = []
        self.patch_call_remote(sent, self.protocol)
        self.control_amp_service.configuration_service.save(TEST_DEPLOYMENT)
        self.control_amp_service.cluster_state.apply_changes([NODE_STATE])

        self.protocol.makeConnection(StringTransportWithAbort())
        cluster_state = self.control_amp_service.cluster_state.as_deployment()
        self.assertEqual(
            sent[0],
            (((ClusterStatusCommand, ),
              dict(configuration=TEST_DEPLOYMENT, state=cluster_state))))

    def test_connection_lost(self):
        """
        When a connection is lost the ``ControlAMP`` is removed from the
        service's set of connections.
        """
        marker = object()
        self.control_amp_service.connections.add(marker)
        # Patching is bad.
        # https://clusterhq.atlassian.net/browse/FLOC-1603
        self.patch(self.protocol, "callRemote",
                   lambda *args, **kwargs: succeed(None))
        self.protocol.makeConnection(StringTransportWithAbort())
        self.protocol.connectionLost(Failure(ConnectionLost()))
        self.assertEqual(self.control_amp_service.connections, {marker})

    def test_version(self):
        """
        ``VersionCommand`` to the control service returns the current internal
        protocol version.
        """
        self.assertEqual(
            self.successResultOf(self.client.callRemote(VersionCommand)),
            {"major": 1})

    def test_nodestate_updates_node_state(self):
        """
        ``NodeStateCommand`` updates the node state.
        """
        changes = (NODE_STATE, NONMANIFEST)
        self.successResultOf(
            self.client.callRemote(NodeStateCommand,
                                   state_changes=changes,
                                   eliot_context=TEST_ACTION))
        self.assertEqual(
            DeploymentState(
                nodes={NODE_STATE},
                nonmanifest_datasets=NONMANIFEST.datasets,
            ),
            self.control_amp_service.cluster_state.as_deployment(),
        )

    def test_activity_refreshes_node_state(self):
        """
        Any time commands are dispatched by ``ControlAMP`` its activity
        timestamp is refreshed to prevent previously applied state from
        expiring.
        """
        self.protocol.makeConnection(StringTransportWithAbort())
        cluster_state = self.control_amp_service.cluster_state

        # Deliver some initial state (T1) which can be expected to be
        # preserved.
        self.successResultOf(
            self.client.callRemote(
                NodeStateCommand,
                state_changes=(SIMPLE_NODE_STATE, ),
                eliot_context=TEST_ACTION,
            ))
        # Let a little time pass (T2) and then cause some activity.
        advance_some(self.reactor)
        self.client.callRemote(NoOp)

        # Let enough time pass (T3) to reach EXPIRATION_TIME from T1
        advance_rest(self.reactor)
        before_wipe_state = cluster_state.as_deployment()

        # Let enough time pass (T4) to reach EXPIRATION_TIME from T2
        advance_some(self.reactor)
        after_wipe_state = cluster_state.as_deployment()

        # The state from T1 should not have been wiped at T3 but it should have
        # been wiped at T4.
        self.assertEqual(
            (before_wipe_state, after_wipe_state),
            (DeploymentState(nodes={SIMPLE_NODE_STATE}), DeploymentState()),
        )

    def test_nodestate_notifies_all_connected(self):
        """
        ``NodeStateCommand`` results in all connected ``ControlAMP``
        connections getting the updated cluster state along with the
        desired configuration.
        """
        self.control_amp_service.configuration_service.save(TEST_DEPLOYMENT)

        agents = [FakeAgent(), FakeAgent()]
        clients = list(AgentAMP(Clock(), agent) for agent in agents)
        servers = list(LoopbackAMPClient(client.locator) for client in clients)

        for server in servers:
            delayed = DelayedAMPClient(server)
            self.control_amp_service.connected(delayed)
            delayed.respond()

        self.successResultOf(
            self.client.callRemote(NodeStateCommand,
                                   state_changes=(NODE_STATE, ),
                                   eliot_context=TEST_ACTION))

        cluster_state = self.control_amp_service.cluster_state.as_deployment()
        expected = dict(configuration=TEST_DEPLOYMENT, state=cluster_state)
        self.assertEqual(
            [expected] * len(agents),
            list(
                dict(configuration=agent.desired, state=agent.actual)
                for agent in agents),
        )

    def test_too_long_node_state(self):
        """
        AMP protocol can transmit node states with 800 applications.
        """
        node_prototype = NodeState(
            hostname=u"192.0.3.13",
            uuid=uuid4(),
            applications=[],
        )
        node = huge_node(node_prototype)
        d = self.client.callRemote(
            NodeStateCommand,
            state_changes=(node, ),
            eliot_context=TEST_ACTION,
        )
        self.successResultOf(d)
        self.assertEqual(
            DeploymentState(nodes=[node]),
            self.control_amp_service.cluster_state.as_deployment(),
        )

    def test_set_node_era(self):
        """
        A ``SetNodeEraCommand`` results in the node's era being
        updated.
        """
        node_uuid = uuid4()
        era = uuid4()
        d = self.client.callRemote(SetNodeEraCommand,
                                   node_uuid=unicode(node_uuid),
                                   era=unicode(era))
        self.successResultOf(d)
        self.assertEqual(
            DeploymentState(node_uuid_to_era={node_uuid: era}),
            self.control_amp_service.cluster_state.as_deployment(),
        )
コード例 #43
0
ファイル: test_protocol.py プロジェクト: miamitops/vumi
class EsmeTestCase(VumiTestCase):

    @inlineCallbacks
    def setUp(self):
        self.tx_helper = self.add_helper(TransportHelper(DummySmppTransport))
        self.persistence_helper = self.add_helper(PersistenceHelper())
        self.redis = yield self.persistence_helper.get_redis_manager()
        self.clock = Clock()
        self.patch(EsmeTransceiver, 'clock', self.clock)

    @inlineCallbacks
    def get_protocol(self, config={},
                     deliver_sm_processor=None, dr_processor=None,
                     factory_class=None):

        factory_class = factory_class or EsmeTransceiverFactory

        default_config = {
            'transport_name': 'sphex_transport',
            'twisted_endpoint': 'tcp:host=127.0.0.1:port=0',
            'system_id': 'system_id',
            'password': '******',
            'smpp_bind_timeout': 30,
        }

        if deliver_sm_processor:
            default_config['deliver_short_message_processor'] = (
                deliver_sm_processor)

        if dr_processor:
            default_config['delivery_report_processor'] = (
                dr_processor)

        default_config.update(config)

        smpp_transport = yield self.tx_helper.get_transport(default_config)

        factory = factory_class(smpp_transport)
        proto = factory.buildProtocol(('127.0.0.1', 0))
        self.add_cleanup(proto.connectionLost, reason=ConnectionDone)
        returnValue(proto)

    def assertCommand(self, pdu, cmd_id, sequence_number=None,
                      status=None, params={}):
        self.assertEqual(command_id(pdu), cmd_id)
        if sequence_number is not None:
            self.assertEqual(seq_no(pdu), sequence_number)
        if status is not None:
            self.assertEqual(command_status(pdu), status)

        pdu_params = {}
        if params:
            if 'body' not in pdu:
                raise Exception('Body does not have parameters.')

            mandatory_parameters = pdu['body']['mandatory_parameters']
            for key in params:
                if key in mandatory_parameters:
                    pdu_params[key] = mandatory_parameters[key]

            self.assertEqual(params, pdu_params)

    @inlineCallbacks
    def setup_bind(self, config={}, clear=True, factory_class=None):
        protocol = yield self.get_protocol(config, factory_class=factory_class)
        transport = yield connect_transport(protocol)
        yield bind_protocol(transport, protocol, clear=clear)
        returnValue((transport, protocol))

    def lookup_message_ids(self, protocol, seq_nums):
        message_stash = protocol.vumi_transport.message_stash
        lookup_func = message_stash.get_sequence_number_message_id
        return gatherResults([lookup_func(seq_num) for seq_num in seq_nums])

    @inlineCallbacks
    def test_on_connection_made(self):
        protocol = yield self.get_protocol()
        self.assertEqual(protocol.state, EsmeTransceiver.CLOSED_STATE)
        transport = yield connect_transport(
            protocol, system_id='system_id', password='******')
        self.assertEqual(protocol.state, EsmeTransceiver.OPEN_STATE)
        [bind_pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(
            bind_pdu,
            'bind_transceiver',
            sequence_number=1,
            params={
                'system_id': 'system_id',
                'password': '******',
            })

    @inlineCallbacks
    def test_drop_link(self):
        protocol = yield self.get_protocol()
        transport = yield connect_transport(protocol)
        [bind_pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(bind_pdu, 'bind_transceiver')
        self.assertFalse(protocol.is_bound())
        self.assertEqual(protocol.state, EsmeTransceiver.OPEN_STATE)
        self.assertFalse(transport.disconnecting)
        self.clock.advance(protocol.config.smpp_bind_timeout + 1)
        [unbind_pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(unbind_pdu, 'unbind')
        unbind_resp_pdu = UnbindResp(sequence_number=seq_no(unbind_pdu))
        yield protocol.on_pdu(unpack_pdu(unbind_resp_pdu.get_bin()))
        self.assertTrue(transport.disconnecting)

    @inlineCallbacks
    def test_on_smpp_bind(self):
        protocol = yield self.get_protocol()
        transport = yield connect_transport(protocol)
        yield bind_protocol(transport, protocol)
        self.assertEqual(protocol.state, EsmeTransceiver.BOUND_STATE_TRX)
        self.assertTrue(protocol.is_bound())
        self.assertTrue(protocol.enquire_link_call.running)

    @inlineCallbacks
    def test_handle_unbind(self):
        transport, protocol = yield self.setup_bind()
        protocol.dataReceived(Unbind(sequence_number=0).get_bin())
        [pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(pdu, 'unbind_resp',
                           sequence_number=0, status='ESME_ROK')

    @inlineCallbacks
    def test_on_submit_sm_resp(self):
        calls = []
        self.patch(EsmeTransceiver, 'on_submit_sm_resp',
                   lambda p, *a: calls.append(a))
        transport, protocol = yield self.setup_bind()
        pdu = SubmitSMResp(sequence_number=0, message_id='foo')
        protocol.dataReceived(pdu.get_bin())
        self.assertEqual(calls, [(0, 'foo', 'ESME_ROK')])

    @inlineCallbacks
    def test_deliver_sm(self):
        calls = []
        self.patch(EsmeTransceiver, 'handle_deliver_sm',
                   lambda p, pdu: succeed(calls.append(pdu)))
        transport, protocol = yield self.setup_bind()
        pdu = DeliverSM(
            sequence_number=0, message_id='foo', short_message='bar')
        protocol.dataReceived(pdu.get_bin())
        [deliver_sm] = calls
        self.assertCommand(deliver_sm, 'deliver_sm', sequence_number=0)

    @inlineCallbacks
    def test_deliver_sm_fail(self):
        transport, protocol = yield self.setup_bind()
        pdu = DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding')
        protocol.dataReceived(pdu.get_bin())
        [deliver_sm_resp] = yield wait_for_pdus(transport, 1)
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RDELIVERYFAILURE')

    @inlineCallbacks
    def test_deliver_sm_fail_with_custom_error(self):
        transport, protocol = yield self.setup_bind(config={
            "deliver_sm_decoding_error": "ESME_RSYSERR"
        })
        pdu = DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding')
        protocol.dataReceived(pdu.get_bin())
        [deliver_sm_resp] = yield wait_for_pdus(transport, 1)
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RSYSERR')

    @inlineCallbacks
    def test_on_enquire_link(self):
        transport, protocol = yield self.setup_bind()
        pdu = EnquireLink(sequence_number=0)
        protocol.dataReceived(pdu.get_bin())
        [enquire_link_resp] = yield wait_for_pdus(transport, 1)
        self.assertCommand(
            enquire_link_resp, 'enquire_link_resp', sequence_number=0,
            status='ESME_ROK')

    @inlineCallbacks
    def test_on_enquire_link_resp(self):
        calls = []
        self.patch(EsmeTransceiver, 'handle_enquire_link_resp',
                   lambda p, pdu: calls.append(pdu))
        transport, protocol = yield self.setup_bind()
        [pdu] = calls
        # bind_transceiver is sequence_number 1
        self.assertEqual(seq_no(pdu), 2)
        self.assertEqual(command_id(pdu), 'enquire_link_resp')

    @inlineCallbacks
    def test_enquire_link_no_response(self):
        transport, protocol = yield self.setup_bind(clear=False)
        protocol.clock.advance(protocol.idle_timeout)
        [unbind_pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(unbind_pdu, 'unbind')
        self.clock.advance(protocol.unbind_timeout)
        self.assertTrue(transport.disconnecting)

    @inlineCallbacks
    def test_enquire_link_looping(self):
        transport, protocol = yield self.setup_bind(clear=False)
        enquire_link_resp = EnquireLinkResp(1)

        protocol.clock.advance(protocol.idle_timeout - 1)
        protocol.dataReceived(enquire_link_resp.get_bin())

        protocol.clock.advance(protocol.idle_timeout - 1)
        self.assertFalse(transport.disconnecting)
        protocol.clock.advance(1)

        [unbind_pdu] = yield wait_for_pdus(transport, 1)
        self.assertCommand(unbind_pdu, 'unbind')
        unbind_resp_pdu = UnbindResp(sequence_number=seq_no(unbind_pdu))
        yield protocol.on_pdu(unpack_pdu(unbind_resp_pdu.get_bin()))
        self.assertTrue(transport.disconnecting)

    @inlineCallbacks
    def test_submit_sm(self):
        transport, protocol = yield self.setup_bind()
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        [submit_sm] = yield wait_for_pdus(transport, 1)
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_configured_parameters(self):
        transport, protocol = yield self.setup_bind({
            'service_type': 'stype',
            'source_addr_ton': 2,
            'source_addr_npi': 2,
            'dest_addr_ton': 2,
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        [submit_sm] = yield wait_for_pdus(transport, 1)
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
            'service_type': 'stype',
            'source_addr_ton': 'national',  # replaced by unpack_pdu()
            'source_addr_npi': 2,
            'dest_addr_ton': 'national',  # replaced by unpack_pdu()
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_long(self):
        transport, protocol = yield self.setup_bind()
        long_message = 'This is a long message.' * 20
        seq_nums = yield protocol.submit_sm_long(
            'abc123', 'dest_addr', long_message)
        [submit_sm] = yield wait_for_pdus(transport, 1)
        pdu_opts = unpacked_pdu_opts(submit_sm)

        self.assertEqual('submit_sm', submit_sm['header']['command_id'])
        self.assertEqual(
            None, submit_sm['body']['mandatory_parameters']['short_message'])
        self.assertEqual(''.join('%02x' % ord(c) for c in long_message),
                         pdu_opts['message_payload'])
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_multipart_udh(self):
        transport, protocol = yield self.setup_bind(config={
            'send_multipart_udh': True,
        })
        long_message = 'This is a long message.' * 20
        seq_numbers = yield protocol.submit_csm_udh(
            'abc123', 'dest_addr', short_message=long_message)
        pdus = yield wait_for_pdus(transport, 4)
        self.assertEqual(len(seq_numbers), 4)

        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            mandatory_parameters = sm['body']['mandatory_parameters']
            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg = mandatory_parameters['short_message']

            udh_hlen, udh_tag, udh_len, udh_ref, udh_tot, udh_seq = [
                ord(octet) for octet in msg[:6]]
            self.assertEqual(5, udh_hlen)
            self.assertEqual(0, udh_tag)
            self.assertEqual(3, udh_len)
            msg_refs.append(udh_ref)
            self.assertEqual(4, udh_tot)
            self.assertEqual(i + 1, udh_seq)
            self.assertTrue(len(msg) <= 136)
            msg_parts.append(msg[6:])
            self.assertEqual(0x40, mandatory_parameters['esm_class'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual(1, len(set(msg_refs)))

        stored_ids = yield self.lookup_message_ids(protocol, seq_numbers)
        self.assertEqual(['abc123'] * len(seq_numbers), stored_ids)

    @inlineCallbacks
    def test_udh_ref_num_limit(self):
        transport, protocol = yield self.setup_bind(config={
            'send_multipart_udh': True,
        })

        # forward until we go past 0xFF
        yield protocol.sequence_generator.advance(0xFF)

        long_message = 'This is a long message.' * 20
        seq_numbers = yield protocol.submit_csm_udh(
            'abc123', 'dest_addr', short_message=long_message)
        pdus = yield wait_for_pdus(transport, 4)

        self.assertEqual(len(seq_numbers), 4)
        self.assertTrue(all([sn > 0xFF for sn in seq_numbers]))

        msg_refs = []

        for pdu in pdus:
            msg = short_message(pdu)
            _, _, _, udh_ref, _, _ = [ord(octet) for octet in msg[:6]]
            msg_refs.append(udh_ref)

        self.assertEqual(1, len(set(msg_refs)))
        self.assertTrue(all([msg_ref < 0xFF for msg_ref in msg_refs]))

    @inlineCallbacks
    def test_submit_sm_multipart_sar(self):
        transport, protocol = yield self.setup_bind(config={
            'send_multipart_sar': True,
        })
        long_message = 'This is a long message.' * 20
        seq_nums = yield protocol.submit_csm_sar(
            'abc123', 'dest_addr', short_message=long_message)
        pdus = yield wait_for_pdus(transport, 4)
        # seq no 1 == bind_transceiver, 2 == enquire_link, 3 == sar_msg_ref_num
        self.assertEqual([4, 5, 6, 7], seq_nums)
        msg_parts = []
        msg_refs = []

        for i, sm in enumerate(pdus):
            pdu_opts = unpacked_pdu_opts(sm)
            mandatory_parameters = sm['body']['mandatory_parameters']

            self.assertEqual('submit_sm', sm['header']['command_id'])
            msg_parts.append(mandatory_parameters['short_message'])
            self.assertTrue(len(mandatory_parameters['short_message']) <= 130)
            msg_refs.append(pdu_opts['sar_msg_ref_num'])
            self.assertEqual(i + 1, pdu_opts['sar_segment_seqnum'])
            self.assertEqual(4, pdu_opts['sar_total_segments'])

        self.assertEqual(long_message, ''.join(msg_parts))
        self.assertEqual([3, 3, 3, 3], msg_refs)

        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'] * len(seq_nums), stored_ids)

    @inlineCallbacks
    def test_sar_ref_num_limit(self):
        transport, protocol = yield self.setup_bind(config={
            'send_multipart_udh': True,
        })

        # forward until we go past 0xFFFF
        yield protocol.sequence_generator.advance(0xFFFF)

        long_message = 'This is a long message.' * 20
        seq_numbers = yield protocol.submit_csm_udh(
            'abc123', 'dest_addr', short_message=long_message)
        pdus = yield wait_for_pdus(transport, 4)

        self.assertEqual(len(seq_numbers), 4)
        self.assertTrue(all([sn > 0xFF for sn in seq_numbers]))

        msg_refs = []

        for pdu in pdus:
            msg = short_message(pdu)
            _, _, _, udh_ref, _, _ = [ord(octet) for octet in msg[:6]]
            msg_refs.append(udh_ref)

        self.assertEqual(1, len(set(msg_refs)))
        self.assertTrue(all([msg_ref < 0xFFFF for msg_ref in msg_refs]))

    @inlineCallbacks
    def test_query_sm(self):
        transport, protocol = yield self.setup_bind()
        yield protocol.query_sm('foo', source_addr='bar')
        [query_sm] = yield wait_for_pdus(transport, 1)
        self.assertCommand(query_sm, 'query_sm', params={
            'message_id': 'foo',
            'source_addr': 'bar',
        })

    @inlineCallbacks
    def test_unbind(self):
        calls = []
        self.patch(EsmeTransceiver, 'handle_unbind_resp',
                   lambda p, pdu: calls.append(pdu))
        transport, protocol = yield self.setup_bind()
        yield protocol.unbind()
        [unbind_pdu] = yield wait_for_pdus(transport, 1)
        protocol.dataReceived(UnbindResp(seq_no(unbind_pdu)).get_bin())
        [unbind_resp_pdu] = calls
        self.assertEqual(seq_no(unbind_resp_pdu), seq_no(unbind_pdu))

    @inlineCallbacks
    def test_bind_transmitter(self):
        transport, protocol = yield self.setup_bind(
            factory_class=EsmeTransmitterFactory)
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_TX)

    @inlineCallbacks
    def test_bind_receiver(self):
        transport, protocol = yield self.setup_bind(
            factory_class=EsmeReceiverFactory)
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_RX)

    @inlineCallbacks
    def test_partial_pdu_data_received(self):
        calls = []
        self.patch(EsmeTransceiver, 'handle_deliver_sm',
                   lambda p, pdu: calls.append(pdu))
        transport, protocol = yield self.setup_bind()
        deliver_sm = DeliverSM(sequence_number=1, short_message='foo')
        pdu = deliver_sm.get_bin()
        half = len(pdu) / 2
        pdu_part1, pdu_part2 = pdu[:half], pdu[half:]
        protocol.dataReceived(pdu_part1)
        self.assertEqual([], calls)
        protocol.dataReceived(pdu_part2)
        [handled_pdu] = calls
        self.assertEqual(command_id(handled_pdu), 'deliver_sm')
        self.assertEqual(seq_no(handled_pdu), 1)
        self.assertEqual(short_message(handled_pdu), 'foo')

    @inlineCallbacks
    def test_unsupported_command_id(self):
        calls = []
        self.patch(EsmeTransceiver, 'on_unsupported_command_id',
                   lambda p, pdu: calls.append(pdu))
        invalid_pdu = {
            'header': {
                'command_id': 'foo',
            }
        }
        transport, protocol = yield self.setup_bind()
        protocol.on_pdu(invalid_pdu)
        self.assertEqual(calls, [invalid_pdu])

    @inlineCallbacks
    def test_csm_split_message(self):
        protocol = yield self.get_protocol()

        def split(msg):
            return protocol.csm_split_message(msg.encode('utf-8'))

        # these are fine because they're in the 7-bit character set
        self.assertEqual(1, len(split(u'&' * 140)))
        self.assertEqual(1, len(split(u'&' * 160)))
        # ± is not in the 7-bit character set so it should utf-8 encode it
        # which bumps it over the 140 bytes
        self.assertEqual(2, len(split(u'±' + u'1' * 139)))
コード例 #44
0
class GAIEndpointTestCase(TestCase):
    """
    Test cases for L{GAIEndpoint}.
    """

    def makeEndpoint(self, host="abcd.example.com", port=4321):
        gaie = GAIEndpoint(self.clock, host, port)
        gaie.subEndpoint = self.subEndpoint
        gaie.deferToThread = self.deferToSomething
        return gaie


    def subEndpoint(self, reactor, host, port, contextFactory):
        ftcpe = FakeTCPEndpoint(reactor, host, port, contextFactory)
        self.fakeRealEndpoints.append(ftcpe)
        return ftcpe


    def deferToSomething(self, func, *a, **k):
        """
        Test replacement for L{deferToThread}, which can only call
        L{getaddrinfo}.
        """
        d = Deferred()
        if func is not getaddrinfo:
            self.fail("Only getaddrinfo should be invoked in a thread.")
        self.inThreads.append((d, func, a, k))
        return d


    def gaiResult(self, family, socktype, proto, canonname, sockaddr):
        """
        A call to L{getaddrinfo} has succeeded; invoke the L{Deferred} waiting
        on it.
        """
        d, _ignore_f, _ignore_a, _ignore_k = self.inThreads.pop(0)
        d.callback([(family, socktype, proto, canonname, sockaddr)])


    def setUp(self):
        """
        Set up!
        """
        self.inThreads = []
        self.clock = Clock()
        self.fakeRealEndpoints = []
        self.makeEndpoint()


    def test_simpleSuccess(self):
        """
        If C{getaddrinfo} gives one L{GAIEndpoint.connect}.
        """
        gaiendpoint = self.makeEndpoint()
        protos = []
        f = Factory()
        f.protocol = Protocol
        gaiendpoint.connect(f).addCallback(protos.append)
        WHO_CARES = 0
        WHAT_EVER = ""
        self.gaiResult(AF_INET, SOCK_STREAM, WHO_CARES, WHAT_EVER,
                       ("1.2.3.4", 4321))
        self.clock.advance(1.0)
        attempt = self.fakeRealEndpoints[0]._attempt
        attempt.callback(self.fakeRealEndpoints[0]._factory.buildProtocol(None))
        self.assertEqual(len(protos), 1)
コード例 #45
0
ファイル: test_protocol.py プロジェクト: ienliven/flocker
class AgentClientTests(TestCase):
    """
    Tests for ``AgentAMP``.
    """
    def setUp(self):
        super(AgentClientTests, self).setUp()
        self.agent = FakeAgent()
        self.reactor = Clock()
        self.client = AgentAMP(self.reactor, self.agent)
        self.client.makeConnection(StringTransportWithAbort())
        # The server needs to send commands to the client, so it acts as
        # an AMP client in that regard. Due to https://tm.tl/7761 we need
        # to access the passed in locator directly.
        self.server = LoopbackAMPClient(self.client.locator)

    def test_connection_stays_open_on_activity(self):
        """
        The AMP connection remains open when communication is received at
        any time up to the timeout limit.
        """
        advance_some(self.reactor)
        self.server.callRemote(NoOp)
        self.reactor.advance(PING_INTERVAL.seconds * 1.9)
        # This NoOp will reset the timeout.
        self.server.callRemote(NoOp)
        self.reactor.advance(PING_INTERVAL.seconds * 1.9)
        self.assertEqual(self.client.transport.aborted, False)

    def test_connection_closed_on_no_activity(self):
        """
        If no communication has been received for long enough that we expire
        cluster state, the silent connection is forcefully closed.
        """
        advance_some(self.reactor)
        self.server.callRemote(NoOp)
        self.reactor.advance(PING_INTERVAL.seconds * 2)
        self.assertEqual(self.client.transport.aborted, True)

    def test_initially_not_connected(self):
        """
        The agent does not get told a connection was made or lost before it's
        actually happened.
        """
        self.agent = FakeAgent()
        self.reactor = Clock()
        self.client = AgentAMP(self.reactor, self.agent)
        self.assertEqual(self.agent,
                         FakeAgent(is_connected=False, is_disconnected=False))

    def test_connection_made(self):
        """
        Connection made events are passed on to the agent.
        """
        self.assertEqual(self.agent,
                         FakeAgent(is_connected=True, client=self.client))

    def test_connection_lost(self):
        """
        Connection lost events are passed on to the agent.
        """
        self.client.connectionLost(Failure(ConnectionLost()))
        self.assertEqual(self.agent,
                         FakeAgent(is_connected=True, is_disconnected=True))

    def test_too_long_configuration(self):
        """
        AMP protocol can transmit configurations with 800 applications.
        """
        actual = DeploymentState(nodes=[])
        configuration = huge_deployment()
        d = self.server.callRemote(ClusterStatusCommand,
                                   configuration=configuration,
                                   state=actual,
                                   eliot_context=TEST_ACTION)

        self.successResultOf(d)
        self.assertEqual(configuration, self.agent.desired)

    def test_too_long_state(self):
        """
        AMP protocol can transmit states with 800 applications.
        """
        state = huge_state()
        d = self.server.callRemote(
            ClusterStatusCommand,
            configuration=Deployment(),
            state=state,
            eliot_context=TEST_ACTION,
        )
        self.successResultOf(d)
        self.assertEqual(state, self.agent.actual)

    def test_cluster_updated(self):
        """
        ``ClusterStatusCommand`` sent to the ``AgentClient`` result in agent
        having cluster state updated.
        """
        actual = DeploymentState(nodes=[])
        d = self.server.callRemote(ClusterStatusCommand,
                                   configuration=TEST_DEPLOYMENT,
                                   state=actual,
                                   eliot_context=TEST_ACTION)

        self.successResultOf(d)
        self.assertEqual(
            self.agent,
            FakeAgent(is_connected=True,
                      client=self.client,
                      desired=TEST_DEPLOYMENT,
                      actual=actual))
コード例 #46
0
ファイル: test_protocol.py プロジェクト: westerncapelabs/vumi
class TestEsmeProtocol(VumiTestCase):

    @inlineCallbacks
    def setUp(self):
        self.clock = Clock()
        self.persistence_helper = self.add_helper(PersistenceHelper())
        self.redis = yield self.persistence_helper.get_redis_manager()
        self.fake_smsc = FakeSMSC(auto_accept=False)

    def get_protocol(self, config={}, bind_type='TRX', accept_connection=True):
        cfg = {
            'transport_name': 'sphex_transport',
            'twisted_endpoint': 'tcp:host=127.0.0.1:port=0',
            'system_id': 'system_id',
            'password': '******',
            'smpp_bind_timeout': 30,
        }
        cfg.update(config)
        dummy_service = DummySmppService(self.clock, self.redis, cfg)

        factory = EsmeProtocolFactory(dummy_service, bind_type)
        proto_d = self.fake_smsc.endpoint.connect(factory)
        if accept_connection:
            self.fake_smsc.accept_connection()
        return proto_d

    def assertCommand(self, pdu, cmd_id, sequence_number=None,
                      status=None, params={}):
        self.assertEqual(command_id(pdu), cmd_id)
        if sequence_number is not None:
            self.assertEqual(seq_no(pdu), sequence_number)
        if status is not None:
            self.assertEqual(command_status(pdu), status)

        pdu_params = {}
        if params:
            if 'body' not in pdu:
                raise Exception('Body does not have parameters.')

            mandatory_parameters = pdu['body']['mandatory_parameters']
            for key in params:
                if key in mandatory_parameters:
                    pdu_params[key] = mandatory_parameters[key]

            self.assertEqual(params, pdu_params)

    def lookup_message_ids(self, protocol, seq_nums):
        message_stash = protocol.service.message_stash
        lookup_func = message_stash.get_sequence_number_message_id
        return gatherResults([lookup_func(seq_num) for seq_num in seq_nums])

    @inlineCallbacks
    def test_on_connection_made(self):
        connect_d = self.get_protocol(accept_connection=False)
        protocol = yield self.fake_smsc.await_connecting()
        self.assertEqual(protocol.state, EsmeProtocol.CLOSED_STATE)
        self.fake_smsc.accept_connection()
        protocol = yield connect_d  # Same protocol.
        self.assertEqual(protocol.state, EsmeProtocol.OPEN_STATE)

        bind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            bind_pdu,
            'bind_transceiver',
            sequence_number=1,
            params={
                'system_id': 'system_id',
                'password': '******',
            })

    @inlineCallbacks
    def test_drop_link(self):
        protocol = yield self.get_protocol()
        bind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(bind_pdu, 'bind_transceiver')
        self.assertFalse(protocol.is_bound())
        self.assertEqual(protocol.state, EsmeProtocol.OPEN_STATE)
        self.clock.advance(protocol.config.smpp_bind_timeout + 1)
        unbind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(unbind_pdu, 'unbind')
        yield self.fake_smsc.send_pdu(UnbindResp(seq_no(unbind_pdu)))
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_on_smpp_bind(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)
        self.assertTrue(protocol.is_bound())
        self.assertTrue(protocol.enquire_link_call.running)

    @inlineCallbacks
    def test_handle_unbind(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)
        self.fake_smsc.send_pdu(Unbind(0))
        pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            pdu, 'unbind_resp', sequence_number=0, status='ESME_ROK')
        # We don't change state here.
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)

    @inlineCallbacks
    def test_on_submit_sm_resp(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        calls = []
        protocol.on_submit_sm_resp = lambda *a: calls.append(a)
        yield self.fake_smsc.send_pdu(SubmitSMResp(0, message_id='foo'))
        self.assertEqual(calls, [(0, 'foo', 'ESME_ROK')])

    @inlineCallbacks
    def test_deliver_sm(self):
        calls = []
        protocol = yield self.get_protocol()
        protocol.handle_deliver_sm = lambda pdu: succeed(calls.append(pdu))
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(
            DeliverSM(0, message_id='foo', short_message='bar'))
        [deliver_sm] = calls
        self.assertCommand(deliver_sm, 'deliver_sm', sequence_number=0)

    @inlineCallbacks
    def test_deliver_sm_fail(self):
        yield self.get_protocol()
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding'))
        deliver_sm_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RDELIVERYFAILURE')

    @inlineCallbacks
    def test_deliver_sm_fail_with_custom_error(self):
        yield self.get_protocol({
            "deliver_sm_decoding_error": "ESME_RSYSERR"
        })
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding'))
        deliver_sm_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RSYSERR')

    @inlineCallbacks
    def test_on_enquire_link(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        pdu = EnquireLink(0)
        protocol.dataReceived(pdu.get_bin())
        enquire_link_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            enquire_link_resp, 'enquire_link_resp', sequence_number=0,
            status='ESME_ROK')

    @inlineCallbacks
    def test_on_enquire_link_resp(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_enquire_link_resp = calls.append
        yield self.fake_smsc.bind()
        [pdu] = calls
        # bind_transceiver is sequence_number 1
        self.assertEqual(seq_no(pdu), 2)
        self.assertEqual(command_id(pdu), 'enquire_link_resp')

    @inlineCallbacks
    def test_enquire_link_no_response(self):
        self.fake_smsc.auto_unbind = False
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(self.fake_smsc.connected, True)
        self.clock.advance(protocol.idle_timeout)
        [enquire_link_pdu, unbind_pdu] = yield self.fake_smsc.await_pdus(2)
        self.assertCommand(enquire_link_pdu, 'enquire_link')
        self.assertCommand(unbind_pdu, 'unbind')
        self.assertEqual(self.fake_smsc.connected, True)
        self.clock.advance(protocol.unbind_timeout)
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_enquire_link_looping(self):
        self.fake_smsc.auto_unbind = False
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(self.fake_smsc.connected, True)

        # Respond to a few enquire_link cycles.
        for i in range(5):
            self.clock.advance(protocol.idle_timeout - 1)
            pdu = yield self.fake_smsc.await_pdu()
            self.assertCommand(pdu, 'enquire_link')
            yield self.fake_smsc.respond_to_enquire_link(pdu)

        # Fail to respond, so we disconnect.
        self.clock.advance(protocol.idle_timeout - 1)
        pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(pdu, 'enquire_link')
        self.clock.advance(1)
        unbind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(unbind_pdu, 'unbind')
        yield self.fake_smsc.send_pdu(
            UnbindResp(seq_no(unbind_pdu)))
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_submit_sm(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        submit_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_configured_parameters(self):
        protocol = yield self.get_protocol({
            'service_type': 'stype',
            'source_addr_ton': 2,
            'source_addr_npi': 2,
            'dest_addr_ton': 2,
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        yield self.fake_smsc.bind()
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        submit_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
            'service_type': 'stype',
            'source_addr_ton': 'national',  # replaced by unpack_pdu()
            'source_addr_npi': 2,
            'dest_addr_ton': 'national',  # replaced by unpack_pdu()
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_query_sm(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        yield protocol.query_sm('foo', source_addr='bar')
        query_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(query_sm, 'query_sm', params={
            'message_id': 'foo',
            'source_addr': 'bar',
        })

    @inlineCallbacks
    def test_unbind(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_unbind_resp = calls.append
        yield self.fake_smsc.bind()
        yield protocol.unbind()
        unbind_pdu = yield self.fake_smsc.await_pdu()
        protocol.dataReceived(UnbindResp(seq_no(unbind_pdu)).get_bin())
        [unbind_resp_pdu] = calls
        self.assertEqual(seq_no(unbind_resp_pdu), seq_no(unbind_pdu))

    @inlineCallbacks
    def test_bind_transmitter(self):
        protocol = yield self.get_protocol(bind_type='TX')
        yield self.fake_smsc.bind()
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_TX)

    @inlineCallbacks
    def test_bind_receiver(self):
        protocol = yield self.get_protocol(bind_type='RX')
        yield self.fake_smsc.bind()
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_RX)

    @inlineCallbacks
    def test_partial_pdu_data_received(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_deliver_sm = calls.append
        yield self.fake_smsc.bind()
        deliver_sm = DeliverSM(1, short_message='foo')
        pdu = deliver_sm.get_bin()
        half = len(pdu) / 2
        pdu_part1, pdu_part2 = pdu[:half], pdu[half:]
        yield self.fake_smsc.send_bytes(pdu_part1)
        self.assertEqual([], calls)
        yield self.fake_smsc.send_bytes(pdu_part2)
        [handled_pdu] = calls
        self.assertEqual(command_id(handled_pdu), 'deliver_sm')
        self.assertEqual(seq_no(handled_pdu), 1)
        self.assertEqual(short_message(handled_pdu), 'foo')

    @inlineCallbacks
    def test_unsupported_command_id(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.on_unsupported_command_id = calls.append
        invalid_pdu = {
            'header': {
                'command_id': 'foo',
            }
        }
        protocol.on_pdu(invalid_pdu)
        self.assertEqual(calls, [invalid_pdu])
コード例 #47
0
ファイル: test_persistence.py プロジェクト: maskofG/flocker
class LeasesTests(AsyncTestCase):
    """
    Tests for ``LeaseService`` and ``update_leases``.
    """
    def setUp(self):
        super(LeasesTests, self).setUp()
        self.clock = Clock()
        self.persistence_service = ConfigurationPersistenceService(
            self.clock, FilePath(self.mktemp()))
        self.persistence_service.startService()
        self.addCleanup(self.persistence_service.stopService)

    def test_update_leases_saves_changed_leases(self):
        """
        ``update_leases`` only changes the leases stored in the configuration.
        """
        node_id = uuid4()
        dataset_id = uuid4()

        original_leases = Leases().acquire(
            datetime.fromtimestamp(0, UTC), uuid4(), node_id)

        def update(leases):
            return leases.acquire(
                datetime.fromtimestamp(1000, UTC), dataset_id, node_id)

        d = self.persistence_service.save(
            LATEST_TEST_DEPLOYMENT.set(leases=original_leases))
        d.addCallback(
            lambda _: update_leases(update, self.persistence_service))

        def updated(_):
            self.assertEqual(
                self.persistence_service.get(),
                LATEST_TEST_DEPLOYMENT.set(leases=update(original_leases)))
        d.addCallback(updated)
        return d

    def test_update_leases_result(self):
        """
        ``update_leases`` returns a ``Deferred`` firing with the updated
        ``Leases`` instance.
        """
        node_id = uuid4()
        dataset_id = uuid4()
        original_leases = Leases()

        def update(leases):
            return leases.acquire(
                datetime.fromtimestamp(1000, UTC), dataset_id, node_id)
        d = update_leases(update, self.persistence_service)

        def updated(updated_leases):
            self.assertEqual(updated_leases, update(original_leases))
        d.addCallback(updated)
        return d

    def test_expired_lease_removed(self):
        """
        A lease that has expired is removed from the persisted
        configuration.
        """
        timestep = 100
        node_id = uuid4()
        ids = uuid4(), uuid4()
        # First dataset lease expires at timestep:
        now = self.clock.seconds()
        leases = Leases().acquire(
            datetime.fromtimestamp(now, UTC), ids[0], node_id, timestep)
        # Second dataset lease expires at timestep * 2:
        leases = leases.acquire(
            datetime.fromtimestamp(now, UTC), ids[1], node_id, timestep * 2)
        new_config = Deployment(leases=leases)
        d = self.persistence_service.save(new_config)

        def saved(_):
            self.clock.advance(timestep - 1)  # 99
            before_first_expire = self.persistence_service.get().leases
            self.clock.advance(2)  # 101
            after_first_expire = self.persistence_service.get().leases
            self.clock.advance(timestep - 2)  # 199
            before_second_expire = self.persistence_service.get().leases
            self.clock.advance(2)  # 201
            after_second_expire = self.persistence_service.get().leases

            self.assertTupleEqual(
                (before_first_expire, after_first_expire,
                 before_second_expire, after_second_expire),
                (leases, leases.remove(ids[0]), leases.remove(ids[0]),
                 leases.remove(ids[0]).remove(ids[1])))
        d.addCallback(saved)
        return d

    @capture_logging(None)
    def test_expire_lease_logging(self, logger):
        """
        An expired lease is logged.
        """
        node_id = uuid4()
        dataset_id = uuid4()
        leases = Leases().acquire(
            datetime.fromtimestamp(self.clock.seconds(), UTC),
            dataset_id, node_id, 1)

        d = self.persistence_service.save(Deployment(leases=leases))

        def saved(_):
            logger.reset()
            self.clock.advance(1000)
            assertHasMessage(self, logger, _LOG_EXPIRE, {
                u"dataset_id": dataset_id, u"node_id": node_id})
        d.addCallback(saved)
        return d