コード例 #1
0
ファイル: test_loop.py プロジェクト: aminembarki/flocker
 def test_convergence_done_start_new_iteration(self):
     """
     After a short delay, an FSM completing the changes from one convergence
     iteration starts another iteration.
     """
     local_state = NodeState(hostname=b'192.0.2.123')
     local_state2 = NodeState(hostname=b'192.0.2.123')
     configuration = Deployment(nodes=frozenset([to_node(local_state)]))
     state = DeploymentState(nodes=[local_state])
     action = ControllableAction(result=succeed(None))
     # Because the second action result is unfired Deferred, the second
     # iteration will never finish; applying its changes waits for this
     # Deferred to fire.
     action2 = ControllableAction(result=Deferred())
     deployer = ControllableDeployer(
         [succeed(local_state), succeed(local_state2)],
         [action, action2])
     client = self.successful_amp_client([local_state, local_state2])
     reactor = Clock()
     loop = build_convergence_loop_fsm(reactor, deployer)
     loop.receive(_ClientStatusUpdate(
         client=client, configuration=configuration, state=state))
     reactor.advance(1.0)
     # Calculating actions happened, result was run... and then we did
     # whole thing again:
     self.assertEqual((deployer.calculate_inputs, client.calls),
                      ([(local_state, configuration, state),
                        (local_state2, configuration, state)],
                       [(NodeStateCommand, dict(node_state=local_state)),
                        (NodeStateCommand, dict(node_state=local_state2))]))
コード例 #2
0
ファイル: test_protocol.py プロジェクト: deepakhajare/maas
class SuccessfulAsyncDispatch(unittest.TestCase):

    def setUp(self):
        self.clock = Clock()
        self.tmp_dir_path = tempfile.mkdtemp()
        with FilePath(self.tmp_dir_path).child('nonempty').open('w') as fd:
            fd.write('Something uninteresting')
        self.backend = FilesystemAsyncBackend(self.tmp_dir_path, self.clock)
        self.tftp = TFTP(self.backend, self.clock)

    def test_get_reader_defers(self):
        rrq_datagram = RRQDatagram('nonempty', 'NetASCiI', {})
        rrq_addr = ('127.0.0.1', 1069)
        rrq_mode = "octet"
        d = self.tftp._startSession(rrq_datagram, rrq_addr, rrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IReader.providedBy(d.result.backend))

    def test_get_writer_defers(self):
        wrq_datagram = WRQDatagram('foobar', 'NetASCiI', {})
        wrq_addr = ('127.0.0.1', 1069)
        wrq_mode = "octet"
        d = self.tftp._startSession(wrq_datagram, wrq_addr, wrq_mode)
        self.assertFalse(d.called)
        self.clock.advance(1)
        self.assertTrue(d.called)
        self.assertTrue(IWriter.providedBy(d.result.backend))
コード例 #3
0
ファイル: test_session.py プロジェクト: isaacm/mimic
    def test_impersonation(self):
        """
        SessionStore.session_for_impersonation will return a session that can
        be retrieved by token_id but not username.
        """
        clock = Clock()
        sessions = SessionStore(clock)
        A_LITTLE = 1234
        clock.advance(A_LITTLE)
        A_LOT = 65432
        a = sessions.session_for_impersonation("pretender", A_LOT)
        a_prime = sessions.session_for_impersonation("pretender", A_LOT)
        self.assertIdentical(a, a_prime)
        b = sessions.session_for_token(a.token)
        self.assertEqual(
            a.expires, datetime.utcfromtimestamp(A_LITTLE + A_LOT))
        self.assertIdentical(a, b)
        c = sessions.session_for_username_password("pretender",
                                                   "not a password")
        self.assertNotIdentical(a, c)
        self.assertEqual(a.username, c.username)
        self.assertEqual(a.tenant_id, c.tenant_id)

        # Right now all data_for_api cares about is hashability; this may need
        # to change if it comes to rely upon its argument actually being an API
        # mock.
        same_api = 'not_an_api'

        username_data = c.data_for_api(same_api, list)
        token_data = b.data_for_api(same_api, list)
        impersonation_data = a.data_for_api(same_api, list)

        self.assertIs(username_data, impersonation_data)
        self.assertIs(token_data, impersonation_data)
コード例 #4
0
    def test_ignoreAlreadyAccepting(self):
        """
        If the client sees an event change a second time before
        responding to an invitation found on it during the first
        change notification, the second change notification does not
        generate another accept attempt.
        """
        clock = Clock()
        randomDelay = 7
        vevent = Component.fromString(INVITED_EVENT)
        attendees = tuple(vevent.mainComponent().properties('ATTENDEE'))
        userNumber = int(attendees[1].parameterValue('CN').split(None, 1)[1])
        calendarURL = '/some/calendar/'
        calendar = Calendar(
            caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
        client = StubClient(userNumber, self.mktemp())
        client._calendars[calendarURL] = calendar
        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
        client._events[event.url] = event
        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.random = Deterministic()

        def _gauss(mu, sigma):
            return randomDelay
        accepter.random.gauss = _gauss
        accepter.eventChanged(event.url)
        accepter.eventChanged(event.url)
        clock.advance(randomDelay)
コード例 #5
0
ファイル: test_resource.py プロジェクト: reaperhulk/mimic
    def test_tick(self):
        """
        ``/mimic/v1.1/tick`` (handled by :func:`MimicRoot.advance_time`)
        advances the clock associated with the service.
        """
        clock = Clock()

        def do():
            do.done = True

        do.done = False
        clock.callLater(3.5, do)
        core = MimicCore(clock, [])
        root = MimicRoot(core, clock).app.resource()
        self.assertEqual(do.done, False)
        jreq = json_request(
            self, root, "POST", "/mimic/v1.1/tick", body={"amount": 3.6}
        )
        [response, json_content] = self.successResultOf(jreq)
        self.assertEqual(response.code, 200)
        expected = {
            'advanced': 3.6,
            'now': '1970-01-01T00:00:03.600000Z',
        }
        self.assertEqual(json_content, expected)
        self.assertEqual(do.done, True)
コード例 #6
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
    def test_convergence_sent_state_fail_resends(self):
        """
        If sending state to the control node fails the next iteration will send
        state even if the state hasn't changed.
        """
        local_state = NodeState(hostname=u'192.0.2.123')
        configuration = Deployment(nodes=[to_node(local_state)])
        state = DeploymentState(nodes=[local_state])
        deployer = ControllableDeployer(
            local_state.hostname,
            [succeed(local_state), succeed(local_state.copy())],
            [no_action(), no_action()])
        client = self.make_amp_client(
            [local_state, local_state.copy()], succeed=False
        )
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            client=client, configuration=configuration, state=state))
        reactor.advance(1.0)

        # Calculating actions happened, result was run... and then we did
        # whole thing again:
        self.assertTupleEqual(
            (deployer.calculate_inputs, client.calls),
            (
                # Check that the loop has run twice
                [(local_state, configuration, state),
                 (local_state, configuration, state)],
                # And that state was re-sent even though it remained unchanged
                [(NodeStateCommand, dict(state_changes=(local_state,))),
                 (NodeStateCommand, dict(state_changes=(local_state,)))],
            )
        )
コード例 #7
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
 def test_convergence_error_start_new_iteration(self, logger):
     """
     Even if the convergence fails, a new iteration is started anyway.
     """
     local_state = NodeState(hostname=u'192.0.2.123')
     configuration = Deployment(nodes=frozenset([to_node(local_state)]))
     state = DeploymentState(nodes=[local_state])
     action = ControllableAction(result=fail(RuntimeError()))
     # First discovery succeeds, leading to failing action; second
     # discovery will just wait for Deferred to fire. Thus we expect to
     # finish test in discovery state.
     deployer = ControllableDeployer(
         local_state.hostname,
         [succeed(local_state), Deferred()],
         [action])
     client = self.make_amp_client([local_state])
     reactor = Clock()
     loop = build_convergence_loop_fsm(reactor, deployer)
     self.patch(loop, "logger", logger)
     loop.receive(_ClientStatusUpdate(
         client=client, configuration=configuration, state=state))
     reactor.advance(1.0)
     # Calculating actions happened, result was run and caused error...
     # but we started on loop again and are thus in discovery state,
     # which we can tell because all faked local states have been
     # consumed:
     self.assertEqual(len(deployer.local_states), 0)
コード例 #8
0
 def test_stopService_while_retrying(self):
     s, e, f = self.make_reconnector()
     clock = Clock()
     r = s._delayedRetry = clock.callLater(1.0, lambda: None)
     yield s.stopService()
     self.assertTrue(r.cancelled)
     self.assertIdentical(s._delayedRetry, None)
コード例 #9
0
ファイル: test_loop.py プロジェクト: punalpatel/flocker
    def test_convergence_done_unchanged_notify(self):
        """
        An FSM doing convergence that discovers state unchanged from the last
        state acknowledged by the control service does not re-send that state.
        """
        local_state = NodeState(hostname=u'192.0.2.123')
        configuration = Deployment(nodes=[to_node(local_state)])
        state = DeploymentState(nodes=[local_state])
        deployer = ControllableDeployer(
            local_state.hostname,
            [succeed(local_state), succeed(local_state.copy())],
            [no_action(), no_action()]
        )
        client = self.make_amp_client([local_state])
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            client=client, configuration=configuration, state=state))
        reactor.advance(1.0)

        # Calculating actions happened, result was run... and then we did
        # whole thing again:
        self.assertEqual(
            (deployer.calculate_inputs, client.calls),
            (
                # Check that the loop has run twice
                [(local_state, configuration, state),
                 (local_state, configuration, state)],
                # But that state was only sent once.
                [(NodeStateCommand, dict(state_changes=(local_state,)))],
            )
        )
コード例 #10
0
    def test_sendHeartbeat(self):

        xmppConfig = Config(PListConfigProvider(DEFAULT_CONFIG))
        xmppConfig.Notifications["Services"]["XMPPNotifier"]["Enabled"] = True
        xmppConfig.ServerHostName = "server.example.com"
        xmppConfig.HTTPPort = 80

        clock = Clock()
        xmlStream = StubXmlStream()
        settings = { "ServiceAddress" : "pubsub.example.com", "JID" : "jid",
            "Password" : "password", "KeepAliveSeconds" : 5,
            "NodeConfiguration" : { "pubsub#deliver_payloads" : "1" },
            "HeartbeatMinutes" : 30 }
        notifier = XMPPNotifier(settings, reactor=clock, heartbeat=True,
            roster=False, configOverride=xmppConfig)
        factory = XMPPNotificationFactory(notifier, settings, reactor=clock,
            keepAlive=False)
        factory.connected(xmlStream)
        factory.authenticated(xmlStream)

        self.assertEquals(len(xmlStream.elements), 1)
        heartbeat = xmlStream.elements[0]
        self.assertEquals(heartbeat.name, "iq")

        clock.advance(1800)

        self.assertEquals(len(xmlStream.elements), 2)
        heartbeat = xmlStream.elements[1]
        self.assertEquals(heartbeat.name, "iq")

        factory.disconnected(xmlStream)
        clock.advance(1800)
        self.assertEquals(len(xmlStream.elements), 2)
コード例 #11
0
    def test_limited_exceptions(self):
        """
        By default, ``retry_failure`` retries on any exception. However, if
        it's given an iterable of expected exception types (exactly as one
        might pass to ``Failure.check``), then it will only retry if one of
        *those* exceptions is raised.
        """
        steps = [0.1, 0.2]

        result = object()
        type_error = Failure(TypeError("bad type"))

        results = [
            Failure(ValueError("bad value")),
            type_error,
            succeed(result),
        ]

        def function():
            return results.pop(0)

        clock = Clock()

        d = retry_failure(clock, function, expected=[ValueError], steps=steps)
        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(self.failureResultOf(d), type_error)
コード例 #12
0
    def test_iterates(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, result]

        def predicate():
            return results.pop(0)
        clock = Clock()

        d = loop_until(clock, predicate)

        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(
            self.successResultOf(d),
            result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'result': result,
        })
        self.assertTrue(action.succeeded)
        message = LoggedMessage.of_type(
            logger.messages, LOOP_UNTIL_ITERATION_MESSAGE)[0]
        self.assertEqual(action.children, [message])
        assertContainsFields(self, message.message, {
            'result': None,
        })
コード例 #13
0
ファイル: test_loop.py プロジェクト: uedzen/flocker
    def test_convergence_done_delays_new_iteration_ack(self):
        """
        A state update isn't sent if the control node hasn't acknowledged the
        last state update.
        """
        self.local_state = local_state = NodeState(hostname=u'192.0.2.123')
        self.configuration = configuration = Deployment()
        self.cluster_state = received_state = DeploymentState(nodes=[])
        self.action = action = ControllableAction(result=succeed(None))
        deployer = ControllableDeployer(
            local_state.hostname, [succeed(local_state)], [action]
        )
        client = self.make_amp_client([local_state])
        reactor = Clock()
        loop = build_convergence_loop_fsm(reactor, deployer)
        loop.receive(_ClientStatusUpdate(
            # We don't want to receive the acknowledgment of the
            # state update.
            client=DelayedAMPClient(client),
            configuration=configuration,
            state=received_state))

        # Wait for the delay in the convergence loop to pass.  This won't do
        # anything, since we are also waiting for state to be acknowledged.
        reactor.advance(1.0)

        # Only one status update was sent.
        self.assertListEqual(
            client.calls,
            [(NodeStateCommand, dict(state_changes=(local_state,)))],
        )
コード例 #14
0
    def test_too_many_iterations(self):
        """
        If ``retry_failure`` fails more times than there are steps provided, it
        errors back with the last failure.
        """
        steps = [0.1]

        result = object()
        failure = Failure(ValueError("really bad value"))

        results = [
            Failure(ValueError("bad value")),
            failure,
            succeed(result),
        ]

        def function():
            return results.pop(0)

        clock = Clock()

        d = retry_failure(clock, function, steps=steps)
        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(self.failureResultOf(d), failure)
コード例 #15
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_taskStarted_deferred_doesnt_delay_polling(self):
        # If taskStarted returns a deferred, we don't wait for it to fire
        # before polling again.
        class DeferredStartingConsumer(NoopTaskConsumer):
            def taskStarted(self, task):
                started.append(task)
                return Deferred()

        interval = self.factory.getUniqueInteger()
        clock = Clock()
        produced = []
        started = []

        def producer():
            value = self.factory.getUniqueInteger()
            produced.append(value)
            return value

        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        consumer = DeferredStartingConsumer()
        task_source.start(consumer)
        # The call to start polls once and taskStarted is called.
        self.assertEqual((1, 1), (len(produced), len(started)))
        # Even though taskStarted returned a deferred which has not yet fired,
        # we poll again after 'interval' seconds.
        clock.advance(interval)
        self.assertEqual((2, 2), (len(produced), len(started)))
コード例 #16
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_taskProductionFailed_deferred_doesnt_delay_polling(self):
        # If taskProductionFailed returns a deferred, we don't wait for it to
        # fire before polling again.
        class DeferredFailingConsumer(NoopTaskConsumer):
            def taskProductionFailed(self, reason):
                failures.append(reason)
                return Deferred()

        interval = self.factory.getUniqueInteger()
        clock = Clock()
        produced = []
        failures = []

        def producer():
            exc = RuntimeError()
            produced.append(exc)
            raise exc

        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        consumer = DeferredFailingConsumer()
        task_source.start(consumer)
        # The call to start polls once and taskProductionFailed is called.
        self.assertEqual((1, 1), (len(produced), len(failures)))
        # Even though taskProductionFailed returned a deferred which has not
        # yet fired, we poll again after 'interval' seconds.
        clock.advance(interval)
        self.assertEqual((2, 2), (len(produced), len(failures)))
コード例 #17
0
ファイル: test_task.py プロジェクト: vitaminmoo/unnaturalcode
    def test_only_one_producer_call_at_once(self):
        # If the task producer returns a Deferred, it will not be called again
        # until that deferred has fired, even if takes longer than the
        # interval we're polling at.
        tasks_called = []
        produced_deferreds = []

        def producer():
            deferred = Deferred()
            produced_deferreds.append(deferred)
            return deferred

        clock = Clock()
        interval = self.factory.getUniqueInteger()
        task_source = self.makeTaskSource(task_producer=producer, interval=interval, clock=clock)
        task_source.start(AppendingTaskConsumer(tasks_called))
        # The call to start calls producer.  It returns a deferred which has
        # not been fired.
        self.assertEqual(len(produced_deferreds), 1)
        # If 'interval' seconds passes and the deferred has still not fired
        # the producer is not called again.
        clock.advance(interval)
        self.assertEqual(len(produced_deferreds), 1)
        # If the task-getting deferred is fired and more time passes, we poll
        # again.
        produced_deferreds[0].callback(None)
        clock.advance(interval)
        self.assertEqual(len(produced_deferreds), 2)
コード例 #18
0
    def test_get_probe_timeout(self):
        """
        CreateContainer probe times-out if get_probe runs too long.
        """
        clock = Clock()

        node_id = uuid4()
        node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1'))
        control_service = FakeFlockerClient([node], node_id)

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )
        operation = CreateContainer(clock, cluster)
        d = operation.get_probe()

        clock.advance(DEFAULT_TIMEOUT.total_seconds())

        # No control_service.synchronize_state() call, so cluster state
        # never shows container is created.

        # The Deferred fails if container not created within 10 minutes.
        self.failureResultOf(d)
コード例 #19
0
        def test_connect_no_auth_method(self, fake_sleep):
            endpoint = Mock()

            directlyProvides(endpoint, IStreamClientEndpoint)
            component = Component(
                transports={
                    "type": "websocket",
                    "url": "ws://127.0.0.1/ws",
                    "endpoint": endpoint,
                },
                is_fatal=lambda e: True,
            )

            def connect(factory, **kw):
                proto = factory.buildProtocol('boom')
                proto.makeConnection(Mock())

                from autobahn.websocket.protocol import WebSocketProtocol
                from base64 import b64encode
                from hashlib import sha1
                key = proto.websocket_key + WebSocketProtocol._WS_MAGIC
                proto.data = (
                    b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
                    b"Upgrade: websocket\x0d\x0a"
                    b"Connection: upgrade\x0d\x0a"
                    b"Sec-Websocket-Protocol: wamp.2.json\x0d\x0a"
                    b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a"
                )
                proto.processHandshake()

                from autobahn.wamp import role
                subrole = role.RoleSubscriberFeatures()

                msg = Hello(u"realm", roles=dict(subscriber=subrole), authmethods=[u"anonymous"])
                serializer = JsonSerializer()
                data, is_binary = serializer.serialize(msg)
                proto.onMessage(data, is_binary)

                msg = Abort(reason=u"wamp.error.no_auth_method")
                proto.onMessage(*serializer.serialize(msg))
                proto.onClose(False, 100, u"wamp.error.no_auth_method")

                return succeed(proto)
            endpoint.connect = connect

            # XXX it would actually be nicer if we *could* support
            # passing a reactor in here, but the _batched_timer =
            # make_batched_timer() stuff (slash txaio in general)
            # makes this "hard".
            reactor = Clock()
            with replace_loop(reactor):
                with self.assertRaises(RuntimeError) as ctx:
                    d = component.start(reactor=reactor)
                    # make sure we fire all our time-outs
                    reactor.advance(3600)
                    yield d
            self.assertIn(
                "Exhausted all transport",
                str(ctx.exception)
            )
コード例 #20
0
ファイル: test_xml_over_tcp.py プロジェクト: AndrewCvekl/vumi
    def test_timeout(self):
        request_body = (
            "<ENQRequest>"
            "<requestId>0</requestId>"
            "<enqCmd>ENQUIRELINK</enqCmd>"
            "</ENQRequest>")
        expected_request_packet = utils.mk_packet('0', request_body)

        clock = Clock()
        self.client.clock = clock
        self.client.enquire_link_interval = 120
        self.client.timeout_period = 20
        self.client.authenticated = True
        self.client.start_periodic_enquire_link()

        # wait for the first enquire link request
        received_request_packet = yield self.server.wait_for_data()
        self.assertEqual(expected_request_packet, received_request_packet)

        # advance to just before the timeout should occur
        clock.advance(19.9)
        self.assertFalse(self.client.disconnected)

        # advance to just after the timeout should occur
        clock.advance(0.1)
        self.assertTrue(self.client.disconnected)
        self.assert_in_log(
            'msg',
            "No enquire link response received after 20 seconds, "
            "disconnecting")
コード例 #21
0
    def test_sendPresence(self):
        clock = Clock()
        xmlStream = StubXmlStream()
        settings = { "ServiceAddress" : "pubsub.example.com", "JID" : "jid",
            "NodeConfiguration" : { "pubsub#deliver_payloads" : "1" },
            "Password" : "password", "KeepAliveSeconds" : 5 }
        notifier = XMPPNotifier(settings, reactor=clock, heartbeat=False)
        factory = XMPPNotificationFactory(notifier, settings, reactor=clock)
        factory.connected(xmlStream)
        factory.authenticated(xmlStream)

        self.assertEquals(len(xmlStream.elements), 2)
        presence = xmlStream.elements[0]
        self.assertEquals(presence.name, "presence")
        iq = xmlStream.elements[1]
        self.assertEquals(iq.name, "iq")

        clock.advance(5)

        self.assertEquals(len(xmlStream.elements), 3)
        presence = xmlStream.elements[2]
        self.assertEquals(presence.name, "presence")

        factory.disconnected(xmlStream)
        clock.advance(5)
        self.assertEquals(len(xmlStream.elements), 3)
コード例 #22
0
ファイル: collector.py プロジェクト: inkhey/mmc
    def test04_get_only_valid_requests(self):
        """
        - create a lot of requests marked as 'expired'
        - wait some time
        - create another lot of requests marked as 'valid'
        -> check if only 'valid' requests present
        """

        clock = Clock()

        sessions = Sessions(False, 10, clock)
        collector = Collector(sessions)

        dl = []
        for i in xrange(10):
            d = collector.queue_and_process("192.168.45.12", "expired")
            dl.append(d)

        clock.advance(15)

        for i in xrange(10):
            d = collector.queue_and_process("192.168.45.12", "valid")
            dl.append(d)


        dfl = DeferredList(dl)
        @dfl.addCallback
        def get_result(ignored):

            for i in xrange(10):
                uid, ip, request = collector.get()
                self.assertEqual(request, "valid")
コード例 #23
0
    def assert_mutate_function_retries_until_timeout(
            self, mutate_callable, expected_args, timeout=60):
        """
        Assert that some CLB function that mutates the CLB will retry on
        pending update until the function times out.

        :param mutate_callable: a callable which takes a clb argument and
            a clock argument - this callable should call the CLB's mutate
            function with the required arguments and return the function's
            return value.  For example:
            ``lambda clb, clk: clb.update_node(..., clock=clk)``
        :param expected_args: What are the expected treq arguments?  This
            should be an array of
            [method, url, (expected args, expected kwargs)]
        :param int timeout: When does your function time out retrying?
        """
        clock = Clock()
        clb = self.get_clb(*(expected_args + pending_update_response))

        d = mutate_callable(clb, clock)
        self.assertNoResult(d)

        for _ in range((timeout - 1) / 3):
            clock.pump([3])
            self.assertNoResult(d)

        clock.pump([3])
        self.failureResultOf(d, TimedOutError)
コード例 #24
0
    def assert_mutate_function_retries_until_success(
            self, mutate_callable, expected_args, success_response,
            expected_result):
        """
        Assert that some CLB function that mutates the CLB will retry on
        pending update until the function succeeds.

        :param mutate_callable: a callable which takes a clb argument and
            a clock argument - this callable should call the CLB's mutate
            function with the required arguments and return the function's
            return value.  For example:
            ``lambda clb, clk: clb.update_node(..., clock=clk)``
        :param expected_args: What are the expected treq arguments?  This
            should be an array of
            [method, url, (expected args, expected kwargs)]
        :param success_response: a tuple of (Response, string response body)
            which should be the successful response back from the API
        :param expected_result: What is the expected successful result of the
            function that is called by ``mutate_callable``
        """
        clock = Clock()
        clb = self.get_clb(*(expected_args + pending_update_response))

        d = mutate_callable(clb, clock)

        self.assertNoResult(d)
        clock.pump([3])
        self.assertNoResult(d)

        clb.treq = get_fake_treq(
            *([self] + expected_args + [success_response]))

        clock.pump([3])
        self.assertEqual(self.successResultOf(d), expected_result)
コード例 #25
0
    def test_read_request_load_succeeds(self):
        """
        ReadRequestLoadScenario starts and stops without collapsing.
        """
        c = Clock()

        node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
        node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
        cluster = BenchmarkCluster(
            node1.public_address,
            lambda reactor: FakeFlockerClient([node1, node2]),
            {node1.public_address, node2.public_address},
            default_volume_size=DEFAULT_VOLUME_SIZE
        )

        sample_size = 5
        s = ReadRequestLoadScenario(c, cluster, sample_size=sample_size)

        d = s.start()

        # Request rate samples are recorded every second and we need to
        # collect enough samples to establish the rate which is defined
        # by `sample_size`. Therefore, advance the clock by
        # `sample_size` seconds to obtain enough samples.
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        self.successResultOf(d)
コード例 #26
0
    def test_scenario_throws_exception_when_rate_drops(self):
        """
        ReadRequestLoadScenario raises RequestRateTooLow if rate
        drops below the requested rate.

        Establish the requested rate by having the FakeFlockerClient
        respond to all requests, then lower the rate by dropping
        alternate requests. This should result in RequestRateTooLow
        being raised.
        """
        c = Clock()

        cluster = self.make_cluster(RequestDroppingFakeFlockerClient)
        sample_size = 5
        s = ReadRequestLoadScenario(c, cluster, sample_size=sample_size)

        s.start()

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        cluster.get_control_service(c).drop_requests = True

        # Advance the clock by 2 seconds so that a request is dropped
        # and a new rate which is below the target can be established.
        c.advance(2)

        failure = self.failureResultOf(s.maintained())
        self.assertIsInstance(failure.value, RequestRateTooLow)
コード例 #27
0
ファイル: test_furnace.py プロジェクト: dkkline/bravo
    def test_timer_mega_drift(self):
        # Patch the clock.
        clock = Clock()
        self.tile.burning.clock = clock

        # we have more wood than we need and we can process 2 blocks
        # but we have space only for one
        self.tile.inventory.fuel[0] = Slot(blocks['sapling'].slot, 0, 10)
        self.tile.inventory.crafting[0] = Slot(blocks['sand'].slot, 0, 2)
        self.tile.inventory.crafted[0] = Slot(blocks['glass'].slot, 0, 63)
        self.tile.changed(self.factory, coords)

        # Pump the clock. Burn time is 20s.
        clock.advance(20)

        self.assertEqual(self.factory.world.chunk.states[0],
                         blocks["burning-furnace"].slot) # it was started...
        self.assertEqual(self.factory.world.chunk.states[1],
                         blocks["furnace"].slot) # ...and stopped at the end
        self.assertEqual(self.tile.inventory.fuel[0], (blocks['sapling'].slot, 0, 8))
        self.assertEqual(self.tile.inventory.crafting[0], (blocks['sand'].slot, 0, 1))
        self.assertEqual(self.tile.inventory.crafted[0], (blocks['glass'].slot, 0, 64))
        headers = [header[0] for header, params in self.protocol.write_packet_calls]
        # 2 updates for fuel slot (2 saplings burned)
        # 1 updates for crafting slot (1 sand blocks melted)
        # 1 updates for crafted slot (1 glass blocks crafted)
        self.assertEqual(headers.count('window-slot'), 4)
コード例 #28
0
ファイル: test_furnace.py プロジェクト: dkkline/bravo
    def test_glass_from_sand_on_wood_multiple(self):
        """
        Crafting two glass, from two sand, using ten saplings, should take
        20s and only use four saplings.
        """

        # Patch the clock.
        clock = Clock()
        self.tile.burning.clock = clock

        self.tile.inventory.fuel[0] = Slot(blocks['sapling'].slot, 0, 10)
        self.tile.inventory.crafting[0] = Slot(blocks['sand'].slot, 0, 2)
        self.tile.changed(self.factory, coords)

        # Pump the clock. Burn time is 20s.
        clock.pump([0.5] * 40)

        self.assertEqual(self.factory.world.chunk.states[0],
                         blocks["burning-furnace"].slot) # it was started...
        self.assertEqual(self.factory.world.chunk.states[1],
                         blocks["furnace"].slot) # ...and stopped at the end
        # 2 sands take 20s to smelt, only 4 saplings needed
        self.assertEqual(self.tile.inventory.fuel[0], (blocks['sapling'].slot, 0, 6))
        self.assertEqual(self.tile.inventory.crafting[0], None)
        self.assertEqual(self.tile.inventory.crafted[0], (blocks['glass'].slot, 0, 2))
コード例 #29
0
    def test_changeEventAttendeePreconditionFailed(self):
        """
        If the attempt to accept an invitation fails because of an
        unmet precondition (412), the event is re-retrieved and the
        PUT is re-issued with the new data.
        """
        clock = Clock()
        userNumber = 2
        client = StubClient(userNumber, self.mktemp())
        randomDelay = 3

        calendarURL = '/some/calendar/'
        calendar = Calendar(
            caldavxml.calendar, set(('VEVENT',)), u'calendar', calendarURL, None)
        client._calendars[calendarURL] = calendar

        vevent = Component.fromString(INVITED_EVENT)
        event = Event(client.serializeLocation(), calendarURL + u'1234.ics', None, vevent)
        client._setEvent(event.url, event)

        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.setParameters(acceptDelayDistribution=Deterministic(randomDelay))

        client.rescheduled.add(event.url)

        accepter.eventChanged(event.url)
        clock.advance(randomDelay)
コード例 #30
0
    def test_inboxReplyFailedDelete(self):
        """
        When an inbox item that contains a reply is seen by the client, it
        deletes it immediately.  If the delete fails, the appropriate response
        code is returned.
        """
        userNumber = 1
        clock = Clock()
        inboxURL = '/some/inbox/'
        vevent = Component.fromString(INBOX_REPLY)
        inbox = Calendar(
            caldavxml.schedule_inbox, set(), u'the inbox', inboxURL, None)
        client = StubClient(userNumber, self.mktemp())
        client._calendars[inboxURL] = inbox

        inboxEvent = Event(client.serializeLocation(), inboxURL + u'4321.ics', None, vevent)
        client._setEvent(inboxEvent.url, inboxEvent)
        client._failDeleteWithObject(inboxEvent.url, IncorrectResponseCode(
            NO_CONTENT,
            Response(
                ('HTTP', 1, 1), PRECONDITION_FAILED,
                'Precondition Failed', None, None))
        )
        accepter = Accepter(clock, self.sim, client, userNumber)
        accepter.eventChanged(inboxEvent.url)
        clock.advance(3)
        self.assertNotIn(inboxEvent.url, client._events)
        self.assertNotIn('4321.ics', inbox.events)
コード例 #31
0
ファイル: test_util.py プロジェクト: ling-1/GETAiqiyiDanmu
 def __init__(self):
     Clock.__init__(self)
     self.udpPorts = {}
コード例 #32
0
 def setUp(self):
     super(TestSlaveConnectionTimeouts, self).setUp()
     self.slave_helper = self.useFixture(SlaveTestHelpers())
     self.clock = Clock()
コード例 #33
0
ファイル: test_aprs.py プロジェクト: thejeshgn/shinysdr
 def setUp(self):
     self.clock = Clock()
     self.clock.advance(_dummy_receive_time)
     self.store = TelemetryStore(time_source=self.clock)
コード例 #34
0
ファイル: proto_helpers.py プロジェクト: seaeast/twisted
 def __init__(self):
     MemoryReactor.__init__(self)
     Clock.__init__(self)
コード例 #35
0
class GAIEndpointTestCase(TestCase):
    """
    Test cases for L{GAIEndpoint}.
    """

    def makeEndpoint(self, host="abcd.example.com", port=4321):
        gaie = GAIEndpoint(self.clock, host, port)
        gaie.subEndpoint = self.subEndpoint
        gaie.deferToThread = self.deferToSomething
        return gaie


    def subEndpoint(self, reactor, host, port, contextFactory):
        ftcpe = FakeTCPEndpoint(reactor, host, port, contextFactory)
        self.fakeRealEndpoints.append(ftcpe)
        return ftcpe


    def deferToSomething(self, func, *a, **k):
        """
        Test replacement for L{deferToThread}, which can only call
        L{getaddrinfo}.
        """
        d = Deferred()
        if func is not getaddrinfo:
            self.fail("Only getaddrinfo should be invoked in a thread.")
        self.inThreads.append((d, func, a, k))
        return d


    def gaiResult(self, family, socktype, proto, canonname, sockaddr):
        """
        A call to L{getaddrinfo} has succeeded; invoke the L{Deferred} waiting
        on it.
        """
        d, _ignore_f, _ignore_a, _ignore_k = self.inThreads.pop(0)
        d.callback([(family, socktype, proto, canonname, sockaddr)])


    def setUp(self):
        """
        Set up!
        """
        self.inThreads = []
        self.clock = Clock()
        self.fakeRealEndpoints = []
        self.makeEndpoint()


    def test_simpleSuccess(self):
        """
        If C{getaddrinfo} gives one L{GAIEndpoint.connect}.
        """
        gaiendpoint = self.makeEndpoint()
        protos = []
        f = Factory()
        f.protocol = Protocol
        gaiendpoint.connect(f).addCallback(protos.append)
        WHO_CARES = 0
        WHAT_EVER = ""
        self.gaiResult(AF_INET, SOCK_STREAM, WHO_CARES, WHAT_EVER,
                       ("1.2.3.4", 4321))
        self.clock.advance(1.0)
        attempt = self.fakeRealEndpoints[0]._attempt
        attempt.callback(self.fakeRealEndpoints[0]._factory.buildProtocol(None))
        self.assertEqual(len(protos), 1)
コード例 #36
0
ファイル: test_persistence.py プロジェクト: maskofG/flocker
class LeasesTests(AsyncTestCase):
    """
    Tests for ``LeaseService`` and ``update_leases``.
    """
    def setUp(self):
        super(LeasesTests, self).setUp()
        self.clock = Clock()
        self.persistence_service = ConfigurationPersistenceService(
            self.clock, FilePath(self.mktemp()))
        self.persistence_service.startService()
        self.addCleanup(self.persistence_service.stopService)

    def test_update_leases_saves_changed_leases(self):
        """
        ``update_leases`` only changes the leases stored in the configuration.
        """
        node_id = uuid4()
        dataset_id = uuid4()

        original_leases = Leases().acquire(
            datetime.fromtimestamp(0, UTC), uuid4(), node_id)

        def update(leases):
            return leases.acquire(
                datetime.fromtimestamp(1000, UTC), dataset_id, node_id)

        d = self.persistence_service.save(
            LATEST_TEST_DEPLOYMENT.set(leases=original_leases))
        d.addCallback(
            lambda _: update_leases(update, self.persistence_service))

        def updated(_):
            self.assertEqual(
                self.persistence_service.get(),
                LATEST_TEST_DEPLOYMENT.set(leases=update(original_leases)))
        d.addCallback(updated)
        return d

    def test_update_leases_result(self):
        """
        ``update_leases`` returns a ``Deferred`` firing with the updated
        ``Leases`` instance.
        """
        node_id = uuid4()
        dataset_id = uuid4()
        original_leases = Leases()

        def update(leases):
            return leases.acquire(
                datetime.fromtimestamp(1000, UTC), dataset_id, node_id)
        d = update_leases(update, self.persistence_service)

        def updated(updated_leases):
            self.assertEqual(updated_leases, update(original_leases))
        d.addCallback(updated)
        return d

    def test_expired_lease_removed(self):
        """
        A lease that has expired is removed from the persisted
        configuration.
        """
        timestep = 100
        node_id = uuid4()
        ids = uuid4(), uuid4()
        # First dataset lease expires at timestep:
        now = self.clock.seconds()
        leases = Leases().acquire(
            datetime.fromtimestamp(now, UTC), ids[0], node_id, timestep)
        # Second dataset lease expires at timestep * 2:
        leases = leases.acquire(
            datetime.fromtimestamp(now, UTC), ids[1], node_id, timestep * 2)
        new_config = Deployment(leases=leases)
        d = self.persistence_service.save(new_config)

        def saved(_):
            self.clock.advance(timestep - 1)  # 99
            before_first_expire = self.persistence_service.get().leases
            self.clock.advance(2)  # 101
            after_first_expire = self.persistence_service.get().leases
            self.clock.advance(timestep - 2)  # 199
            before_second_expire = self.persistence_service.get().leases
            self.clock.advance(2)  # 201
            after_second_expire = self.persistence_service.get().leases

            self.assertTupleEqual(
                (before_first_expire, after_first_expire,
                 before_second_expire, after_second_expire),
                (leases, leases.remove(ids[0]), leases.remove(ids[0]),
                 leases.remove(ids[0]).remove(ids[1])))
        d.addCallback(saved)
        return d

    @capture_logging(None)
    def test_expire_lease_logging(self, logger):
        """
        An expired lease is logged.
        """
        node_id = uuid4()
        dataset_id = uuid4()
        leases = Leases().acquire(
            datetime.fromtimestamp(self.clock.seconds(), UTC),
            dataset_id, node_id, 1)

        d = self.persistence_service.save(Deployment(leases=leases))

        def saved(_):
            logger.reset()
            self.clock.advance(1000)
            assertHasMessage(self, logger, _LOG_EXPIRE, {
                u"dataset_id": dataset_id, u"node_id": node_id})
        d.addCallback(saved)
        return d
コード例 #37
0
class TestComponentManagerProperStart(unittest.TestCase):
    def setUp(self):
        self.reactor = Clock()
        mocks.mock_conf_settings(self)
        self.component_manager = ComponentManager(
            skip_components=[
                DATABASE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT,
                STREAM_IDENTIFIER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
                REFLECTOR_COMPONENT, UPNP_COMPONENT, HEADERS_COMPONENT,
                PAYMENT_RATE_COMPONENT, RATE_LIMITER_COMPONENT,
                EXCHANGE_RATE_MANAGER_COMPONENT
            ],
            reactor=self.reactor,
            wallet=mocks.FakeDelayedWallet,
            file_manager=mocks.FakeDelayedFileManager,
            blob_manager=mocks.FakeDelayedBlobManager)

    def tearDown(self):
        pass

    def test_proper_starting_of_components(self):
        self.component_manager.setup()
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)

        self.reactor.advance(1)
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)

        self.reactor.advance(1)
        self.assertTrue(self.component_manager.get_component('wallet').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(
            self.component_manager.get_component('file_manager').running)

    def test_proper_stopping_of_components(self):
        self.component_manager.setup()
        self.reactor.advance(1)
        self.reactor.advance(1)
        self.component_manager.stop()
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertTrue(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(self.component_manager.get_component('wallet').running)

        self.reactor.advance(1)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertTrue(self.component_manager.get_component('wallet').running)

        self.reactor.advance(1)
        self.assertFalse(
            self.component_manager.get_component('file_manager').running)
        self.assertFalse(
            self.component_manager.get_component('blob_manager').running)
        self.assertFalse(
            self.component_manager.get_component('wallet').running)
コード例 #38
0
ファイル: test_ipc.py プロジェクト: schevalier/flocker
 def setUp(self):
     self.pool = FilesystemStoragePool(FilePath(self.mktemp()))
     self.service = VolumeService(
         FilePath(self.mktemp()), self.pool, reactor=Clock())
     self.service.startService()
     self.volume = self.successResultOf(self.service.create(MY_VOLUME))
コード例 #39
0
ファイル: test_protocol.py プロジェクト: westerncapelabs/vumi
 def setUp(self):
     self.clock = Clock()
     self.persistence_helper = self.add_helper(PersistenceHelper())
     self.redis = yield self.persistence_helper.get_redis_manager()
     self.fake_smsc = FakeSMSC(auto_accept=False)
コード例 #40
0
ファイル: test_setup.py プロジェクト: arnimarj/crochet
 def __init__(self):
     Clock.__init__(self)
     self.started = threading.Event()
     self.stopping = False
     self.events = []
コード例 #41
0
ファイル: test_protocol.py プロジェクト: westerncapelabs/vumi
class TestEsmeProtocol(VumiTestCase):

    @inlineCallbacks
    def setUp(self):
        self.clock = Clock()
        self.persistence_helper = self.add_helper(PersistenceHelper())
        self.redis = yield self.persistence_helper.get_redis_manager()
        self.fake_smsc = FakeSMSC(auto_accept=False)

    def get_protocol(self, config={}, bind_type='TRX', accept_connection=True):
        cfg = {
            'transport_name': 'sphex_transport',
            'twisted_endpoint': 'tcp:host=127.0.0.1:port=0',
            'system_id': 'system_id',
            'password': '******',
            'smpp_bind_timeout': 30,
        }
        cfg.update(config)
        dummy_service = DummySmppService(self.clock, self.redis, cfg)

        factory = EsmeProtocolFactory(dummy_service, bind_type)
        proto_d = self.fake_smsc.endpoint.connect(factory)
        if accept_connection:
            self.fake_smsc.accept_connection()
        return proto_d

    def assertCommand(self, pdu, cmd_id, sequence_number=None,
                      status=None, params={}):
        self.assertEqual(command_id(pdu), cmd_id)
        if sequence_number is not None:
            self.assertEqual(seq_no(pdu), sequence_number)
        if status is not None:
            self.assertEqual(command_status(pdu), status)

        pdu_params = {}
        if params:
            if 'body' not in pdu:
                raise Exception('Body does not have parameters.')

            mandatory_parameters = pdu['body']['mandatory_parameters']
            for key in params:
                if key in mandatory_parameters:
                    pdu_params[key] = mandatory_parameters[key]

            self.assertEqual(params, pdu_params)

    def lookup_message_ids(self, protocol, seq_nums):
        message_stash = protocol.service.message_stash
        lookup_func = message_stash.get_sequence_number_message_id
        return gatherResults([lookup_func(seq_num) for seq_num in seq_nums])

    @inlineCallbacks
    def test_on_connection_made(self):
        connect_d = self.get_protocol(accept_connection=False)
        protocol = yield self.fake_smsc.await_connecting()
        self.assertEqual(protocol.state, EsmeProtocol.CLOSED_STATE)
        self.fake_smsc.accept_connection()
        protocol = yield connect_d  # Same protocol.
        self.assertEqual(protocol.state, EsmeProtocol.OPEN_STATE)

        bind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            bind_pdu,
            'bind_transceiver',
            sequence_number=1,
            params={
                'system_id': 'system_id',
                'password': '******',
            })

    @inlineCallbacks
    def test_drop_link(self):
        protocol = yield self.get_protocol()
        bind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(bind_pdu, 'bind_transceiver')
        self.assertFalse(protocol.is_bound())
        self.assertEqual(protocol.state, EsmeProtocol.OPEN_STATE)
        self.clock.advance(protocol.config.smpp_bind_timeout + 1)
        unbind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(unbind_pdu, 'unbind')
        yield self.fake_smsc.send_pdu(UnbindResp(seq_no(unbind_pdu)))
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_on_smpp_bind(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)
        self.assertTrue(protocol.is_bound())
        self.assertTrue(protocol.enquire_link_call.running)

    @inlineCallbacks
    def test_handle_unbind(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)
        self.fake_smsc.send_pdu(Unbind(0))
        pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            pdu, 'unbind_resp', sequence_number=0, status='ESME_ROK')
        # We don't change state here.
        self.assertEqual(protocol.state, EsmeProtocol.BOUND_STATE_TRX)

    @inlineCallbacks
    def test_on_submit_sm_resp(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        calls = []
        protocol.on_submit_sm_resp = lambda *a: calls.append(a)
        yield self.fake_smsc.send_pdu(SubmitSMResp(0, message_id='foo'))
        self.assertEqual(calls, [(0, 'foo', 'ESME_ROK')])

    @inlineCallbacks
    def test_deliver_sm(self):
        calls = []
        protocol = yield self.get_protocol()
        protocol.handle_deliver_sm = lambda pdu: succeed(calls.append(pdu))
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(
            DeliverSM(0, message_id='foo', short_message='bar'))
        [deliver_sm] = calls
        self.assertCommand(deliver_sm, 'deliver_sm', sequence_number=0)

    @inlineCallbacks
    def test_deliver_sm_fail(self):
        yield self.get_protocol()
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding'))
        deliver_sm_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RDELIVERYFAILURE')

    @inlineCallbacks
    def test_deliver_sm_fail_with_custom_error(self):
        yield self.get_protocol({
            "deliver_sm_decoding_error": "ESME_RSYSERR"
        })
        yield self.fake_smsc.bind()
        yield self.fake_smsc.send_pdu(DeliverSM(
            sequence_number=0, message_id='foo', data_coding=4,
            short_message='string with unknown data coding'))
        deliver_sm_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            deliver_sm_resp, 'deliver_sm_resp', sequence_number=0,
            status='ESME_RSYSERR')

    @inlineCallbacks
    def test_on_enquire_link(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        pdu = EnquireLink(0)
        protocol.dataReceived(pdu.get_bin())
        enquire_link_resp = yield self.fake_smsc.await_pdu()
        self.assertCommand(
            enquire_link_resp, 'enquire_link_resp', sequence_number=0,
            status='ESME_ROK')

    @inlineCallbacks
    def test_on_enquire_link_resp(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_enquire_link_resp = calls.append
        yield self.fake_smsc.bind()
        [pdu] = calls
        # bind_transceiver is sequence_number 1
        self.assertEqual(seq_no(pdu), 2)
        self.assertEqual(command_id(pdu), 'enquire_link_resp')

    @inlineCallbacks
    def test_enquire_link_no_response(self):
        self.fake_smsc.auto_unbind = False
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(self.fake_smsc.connected, True)
        self.clock.advance(protocol.idle_timeout)
        [enquire_link_pdu, unbind_pdu] = yield self.fake_smsc.await_pdus(2)
        self.assertCommand(enquire_link_pdu, 'enquire_link')
        self.assertCommand(unbind_pdu, 'unbind')
        self.assertEqual(self.fake_smsc.connected, True)
        self.clock.advance(protocol.unbind_timeout)
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_enquire_link_looping(self):
        self.fake_smsc.auto_unbind = False
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        self.assertEqual(self.fake_smsc.connected, True)

        # Respond to a few enquire_link cycles.
        for i in range(5):
            self.clock.advance(protocol.idle_timeout - 1)
            pdu = yield self.fake_smsc.await_pdu()
            self.assertCommand(pdu, 'enquire_link')
            yield self.fake_smsc.respond_to_enquire_link(pdu)

        # Fail to respond, so we disconnect.
        self.clock.advance(protocol.idle_timeout - 1)
        pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(pdu, 'enquire_link')
        self.clock.advance(1)
        unbind_pdu = yield self.fake_smsc.await_pdu()
        self.assertCommand(unbind_pdu, 'unbind')
        yield self.fake_smsc.send_pdu(
            UnbindResp(seq_no(unbind_pdu)))
        yield self.fake_smsc.await_disconnect()

    @inlineCallbacks
    def test_submit_sm(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        submit_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_submit_sm_configured_parameters(self):
        protocol = yield self.get_protocol({
            'service_type': 'stype',
            'source_addr_ton': 2,
            'source_addr_npi': 2,
            'dest_addr_ton': 2,
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        yield self.fake_smsc.bind()
        seq_nums = yield protocol.submit_sm(
            'abc123', 'dest_addr', short_message='foo')
        submit_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(submit_sm, 'submit_sm', params={
            'short_message': 'foo',
            'service_type': 'stype',
            'source_addr_ton': 'national',  # replaced by unpack_pdu()
            'source_addr_npi': 2,
            'dest_addr_ton': 'national',  # replaced by unpack_pdu()
            'dest_addr_npi': 2,
            'registered_delivery': 0,
        })
        stored_ids = yield self.lookup_message_ids(protocol, seq_nums)
        self.assertEqual(['abc123'], stored_ids)

    @inlineCallbacks
    def test_query_sm(self):
        protocol = yield self.get_protocol()
        yield self.fake_smsc.bind()
        yield protocol.query_sm('foo', source_addr='bar')
        query_sm = yield self.fake_smsc.await_pdu()
        self.assertCommand(query_sm, 'query_sm', params={
            'message_id': 'foo',
            'source_addr': 'bar',
        })

    @inlineCallbacks
    def test_unbind(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_unbind_resp = calls.append
        yield self.fake_smsc.bind()
        yield protocol.unbind()
        unbind_pdu = yield self.fake_smsc.await_pdu()
        protocol.dataReceived(UnbindResp(seq_no(unbind_pdu)).get_bin())
        [unbind_resp_pdu] = calls
        self.assertEqual(seq_no(unbind_resp_pdu), seq_no(unbind_pdu))

    @inlineCallbacks
    def test_bind_transmitter(self):
        protocol = yield self.get_protocol(bind_type='TX')
        yield self.fake_smsc.bind()
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_TX)

    @inlineCallbacks
    def test_bind_receiver(self):
        protocol = yield self.get_protocol(bind_type='RX')
        yield self.fake_smsc.bind()
        self.assertTrue(protocol.is_bound())
        self.assertEqual(protocol.state, protocol.BOUND_STATE_RX)

    @inlineCallbacks
    def test_partial_pdu_data_received(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.handle_deliver_sm = calls.append
        yield self.fake_smsc.bind()
        deliver_sm = DeliverSM(1, short_message='foo')
        pdu = deliver_sm.get_bin()
        half = len(pdu) / 2
        pdu_part1, pdu_part2 = pdu[:half], pdu[half:]
        yield self.fake_smsc.send_bytes(pdu_part1)
        self.assertEqual([], calls)
        yield self.fake_smsc.send_bytes(pdu_part2)
        [handled_pdu] = calls
        self.assertEqual(command_id(handled_pdu), 'deliver_sm')
        self.assertEqual(seq_no(handled_pdu), 1)
        self.assertEqual(short_message(handled_pdu), 'foo')

    @inlineCallbacks
    def test_unsupported_command_id(self):
        protocol = yield self.get_protocol()
        calls = []
        protocol.on_unsupported_command_id = calls.append
        invalid_pdu = {
            'header': {
                'command_id': 'foo',
            }
        }
        protocol.on_pdu(invalid_pdu)
        self.assertEqual(calls, [invalid_pdu])
コード例 #42
0
ファイル: reactor.py プロジェクト: zanssa/buildbot
 def __init__(self):
     Clock.__init__(self)
     CoreReactor.__init__(self)