示例#1
0
 def testResolveToAPromise(self):
     p1,r1 = makePromise()
     p2,r2 = makePromise()
     def _check(res):
         self.failUnlessEqual(res, 1)
     p1._then(_check)
     r1(p2)
     def _continue(res):
         r2(1)
     flushEventualQueue().addCallback(_continue)
     return when(p1)
示例#2
0
 def testResolveToAPromise(self):
     p1,r1 = makePromise()
     p2,r2 = makePromise()
     def _check(res):
         self.assertEqual(res, 1)
     p1._then(_check)
     r1(p2)
     def _continue(res):
         r2(1)
     flushEventualQueue().addCallback(_continue)
     return when(p1)
示例#3
0
 def testResolveToABrokenPromise(self):
     p1,r1 = makePromise()
     p2,r2 = makePromise()
     r1(p2)
     def _continue(res):
         r2(Failure(KaboomError("foom")))
     flushEventualQueue().addCallback(_continue)
     def _check2(res):
         self.failUnless(isinstance(res, Failure))
         self.failUnless(res.check(KaboomError))
     d = when(p1)
     d.addBoth(_check2)
     return d
示例#4
0
 def testResolveToABrokenPromise(self):
     p1,r1 = makePromise()
     p2,r2 = makePromise()
     r1(p2)
     def _continue(res):
         r2(Failure(KaboomError("foom")))
     flushEventualQueue().addCallback(_continue)
     def _check2(res):
         self.assertTrue(isinstance(res, Failure))
         self.assertTrue(res.check(KaboomError))
     d = when(p1)
     d.addBoth(_check2)
     return d
示例#5
0
 def testNotifyOnDisconnect_already(self):
     # make sure notifyOnDisconnect works even if the reference was already
     # broken
     rr, target = self.setupTarget(HelperTarget())
     self.lost = 0
     rr.tracker.broker.transport.loseConnection(Failure(CONNECTION_LOST))
     d = flushEventualQueue()
     d.addCallback(lambda res: rr.notifyOnDisconnect(self.disconnected))
     d.addCallback(lambda res: flushEventualQueue())
     def _check(res):
         self.failUnless(self.lost, "disconnect handler not run")
         self.failUnlessEqual(self.lost_args, ((),{}))
     d.addCallback(_check)
     return d
示例#6
0
    def test_wait_for_brokers(self):
        """
        The L{Deferred} returned by L{Tub.stopService} fires only after the
        L{Broker} connections belonging to the L{Tub} have disconnected.
        """
        tub = Tub()
        tub.startService()

        another_tub = Tub()
        another_tub.startService()

        brokers = list(tub.brokerClass(None) for i in range(3))
        for n, b in enumerate(brokers):
            b.makeConnection(StringTransport())
            ref = SturdyRef(encode_furl(another_tub.tubID, [], str(n)))
            tub.brokerAttached(ref, b, isClient=(n % 2) == 1)

        stopping = tub.stopService()
        d = flushEventualQueue()

        def event(ignored):
            self.assertNoResult(stopping)
            for b in brokers:
                b.connectionLost(failure.Failure(Exception("Connection lost")))
            return flushEventualQueue()

        d.addCallback(event)

        def connectionsLost(ignored):
            self.successResultOf(stopping)

        d.addCallback(connectionsLost)

        return d
示例#7
0
    def test_wait_for_brokers(self):
        """
        The L{Deferred} returned by L{Tub.stopService} fires only after the
        L{Broker} connections belonging to the L{Tub} have disconnected.
        """
        tub = Tub()
        tub.startService()

        another_tub = Tub()
        another_tub.startService()

        brokers = list(tub.brokerClass(None) for i in range(3))
        for n, b in enumerate(brokers):
            b.makeConnection(StringTransport())
            ref = SturdyRef(encode_furl(another_tub.tubID, [], str(n)))
            tub.brokerAttached(ref, b, isClient=(n % 2)==1)

        stopping = tub.stopService()
        d = flushEventualQueue()

        def event(ignored):
            self.assertNoResult(stopping)
            for b in brokers:
                b.connectionLost(failure.Failure(Exception("Connection lost")))
            return flushEventualQueue()
        d.addCallback(event)

        def connectionsLost(ignored):
            self.successResultOf(stopping)
        d.addCallback(connectionsLost)

        return d
示例#8
0
    def test_handler_launch(self):
        reactor = object()
        tor = mock.Mock()
        txtorcon = mock.Mock()
        handler = object()
        tor.control_endpoint_maker = mock.Mock(return_value=handler)
        tor.add_context = mock.Mock(return_value=EmptyContext())
        with mock_tor(tor):
            with mock_txtorcon(txtorcon):
                p = tor_provider.create(reactor, FakeConfig(launch=True))
        h = p.get_tor_handler()
        self.assertIs(h, handler)
        tor.control_endpoint_maker.assert_called_with(p._make_control_endpoint,
                                                      takes_status=True)

        # make sure Tor is launched just once, the first time an endpoint is
        # requested, and never again. The clientFromString() function is
        # called once each time.

        ep_desc = object()
        launch_tor = mock.Mock(return_value=defer.succeed((ep_desc, None)))
        ep = object()
        cfs = mock.Mock(return_value=ep)
        with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor):
            with mock.patch("allmydata.util.tor_provider.clientFromString",
                            cfs):
                d = p._make_control_endpoint(reactor,
                                             update_status=lambda status: None)
                yield flushEventualQueue()
                self.assertIs(self.successResultOf(d), ep)
                launch_tor.assert_called_with(
                    reactor, None, os.path.join("basedir", "private"),
                    txtorcon)
                cfs.assert_called_with(reactor, ep_desc)

        launch_tor2 = mock.Mock(return_value=defer.succeed((ep_desc, None)))
        cfs2 = mock.Mock(return_value=ep)
        with mock.patch("allmydata.util.tor_provider._launch_tor",
                        launch_tor2):
            with mock.patch("allmydata.util.tor_provider.clientFromString",
                            cfs2):
                d2 = p._make_control_endpoint(
                    reactor, update_status=lambda status: None)
                yield flushEventualQueue()
                self.assertIs(self.successResultOf(d2), ep)
                self.assertEqual(launch_tor2.mock_calls, [])
                cfs2.assert_called_with(reactor, ep_desc)
示例#9
0
 def testNear(self):
     t = Target()
     sendOnly(t).one(1)
     self.failIf(t.calls)
     def _check(res):
         self.failUnlessEqual(t.calls, [("one", 1)])
     d = flushEventualQueue()
     d.addCallback(_check)
     return d
示例#10
0
 def testNear(self):
     t = Target()
     sendOnly(t).one(1)
     self.assertFalse(t.calls)
     def _check(res):
         self.assertEqual(t.calls, [("one", 1)])
     d = flushEventualQueue()
     d.addCallback(_check)
     return d
示例#11
0
 def test_5_overdue_immutable(self):
     # restrict the ShareFinder to only allow 5 outstanding requests, and
     # arrange for the first 5 servers to hang. Then trigger the OVERDUE
     # timers (simulating 10 seconds passed), at which point the
     # ShareFinder should send additional queries and finish the download
     # quickly. If we didn't have OVERDUE timers, this test would fail by
     # timing out.
     done = []
     d = self._set_up(False, "test_5_overdue_immutable")
     def _reduce_max_outstanding_requests_and_download(ign):
         self._hang_shares(range(5))
         n = self.c0.create_node_from_uri(self.uri)
         n._cnode._maybe_create_download_node()
         self._sf = n._cnode._node._sharefinder
         self._sf.max_outstanding_requests = 5
         self._sf.OVERDUE_TIMEOUT = 1000.0
         d2 = download_to_data(n)
         # start download, but don't wait for it to complete yet
         def _done(res):
             done.append(res) # we will poll for this later
         d2.addBoth(_done)
     d.addCallback(_reduce_max_outstanding_requests_and_download)
     from foolscap.eventual import fireEventually, flushEventualQueue
     # wait here a while
     d.addCallback(lambda res: fireEventually(res))
     d.addCallback(lambda res: flushEventualQueue())
     d.addCallback(lambda ign: self.failIf(done))
     def _check_waiting(ign):
         # all the share requests should now be stuck waiting
         self.failUnlessEqual(len(self._sf.pending_requests), 5)
         # but none should be marked as OVERDUE until the timers expire
         self.failUnlessEqual(len(self._sf.overdue_requests), 0)
     d.addCallback(_check_waiting)
     def _mark_overdue(ign):
         # declare four requests overdue, allowing new requests to take
         # their place, and leaving one stuck. The finder will keep
         # sending requests until there are 5 non-overdue ones
         # outstanding, at which point we'll have 4 OVERDUE, 1
         # stuck-but-not-overdue, and 4 live requests. All 4 live requests
         # will retire before the download is complete and the ShareFinder
         # is shut off. That will leave 4 OVERDUE and 1
         # stuck-but-not-overdue, for a total of 5 requests in in
         # _sf.pending_requests
         for t in self._sf.overdue_timers.values()[:4]:
             t.reset(-1.0)
         # the timers ought to fire before the eventual-send does
         return fireEventually()
     d.addCallback(_mark_overdue)
     def _we_are_done():
         return bool(done)
     d.addCallback(lambda ign: self.poll(_we_are_done))
     def _check_done(ign):
         self.failUnlessEqual(done, [immutable_plaintext])
         self.failUnlessEqual(len(self._sf.pending_requests), 5)
         self.failUnlessEqual(len(self._sf.overdue_requests), 4)
     d.addCallback(_check_done)
     return d
示例#12
0
 def test_5_overdue_immutable(self):
     # restrict the ShareFinder to only allow 5 outstanding requests, and
     # arrange for the first 5 servers to hang. Then trigger the OVERDUE
     # timers (simulating 10 seconds passed), at which point the
     # ShareFinder should send additional queries and finish the download
     # quickly. If we didn't have OVERDUE timers, this test would fail by
     # timing out.
     done = []
     d = self._set_up(False, "test_5_overdue_immutable")
     def _reduce_max_outstanding_requests_and_download(ign):
         self._hang_shares(range(5))
         n = self.c0.create_node_from_uri(self.uri)
         n._cnode._maybe_create_download_node()
         self._sf = n._cnode._node._sharefinder
         self._sf.max_outstanding_requests = 5
         self._sf.OVERDUE_TIMEOUT = 1000.0
         d2 = download_to_data(n)
         # start download, but don't wait for it to complete yet
         def _done(res):
             done.append(res) # we will poll for this later
         d2.addBoth(_done)
     d.addCallback(_reduce_max_outstanding_requests_and_download)
     from foolscap.eventual import fireEventually, flushEventualQueue
     # wait here a while
     d.addCallback(lambda res: fireEventually(res))
     d.addCallback(lambda res: flushEventualQueue())
     d.addCallback(lambda ign: self.failIf(done))
     def _check_waiting(ign):
         # all the share requests should now be stuck waiting
         self.failUnlessEqual(len(self._sf.pending_requests), 5)
         # but none should be marked as OVERDUE until the timers expire
         self.failUnlessEqual(len(self._sf.overdue_requests), 0)
     d.addCallback(_check_waiting)
     def _mark_overdue(ign):
         # declare four requests overdue, allowing new requests to take
         # their place, and leaving one stuck. The finder will keep
         # sending requests until there are 5 non-overdue ones
         # outstanding, at which point we'll have 4 OVERDUE, 1
         # stuck-but-not-overdue, and 4 live requests. All 4 live requests
         # will retire before the download is complete and the ShareFinder
         # is shut off. That will leave 4 OVERDUE and 1
         # stuck-but-not-overdue, for a total of 5 requests in in
         # _sf.pending_requests
         for t in self._sf.overdue_timers.values()[:4]:
             t.reset(-1.0)
         # the timers ought to fire before the eventual-send does
         return fireEventually()
     d.addCallback(_mark_overdue)
     def _we_are_done():
         return bool(done)
     d.addCallback(lambda ign: self.poll(_we_are_done))
     def _check_done(ign):
         self.failUnlessEqual(done, [immutable_plaintext])
         self.failUnlessEqual(len(self._sf.pending_requests), 5)
         self.failUnlessEqual(len(self._sf.overdue_requests), 4)
     d.addCallback(_check_done)
     return d
示例#13
0
    def test_handler_launch(self):
        reactor = object()
        tor = mock.Mock()
        txtorcon = mock.Mock()
        handler = object()
        tor.control_endpoint_maker = mock.Mock(return_value=handler)
        tor.add_context = mock.Mock(return_value=EmptyContext())
        with mock_tor(tor):
            with mock_txtorcon(txtorcon):
                p = tor_provider.create(reactor, FakeConfig(launch=True))
        h = p.get_tor_handler()
        self.assertIs(h, handler)
        tor.control_endpoint_maker.assert_called_with(p._make_control_endpoint,
                                                      takes_status=True)

        # make sure Tor is launched just once, the first time an endpoint is
        # requested, and never again. The clientFromString() function is
        # called once each time.

        ep_desc = object()
        launch_tor = mock.Mock(return_value=defer.succeed((ep_desc,None)))
        ep = object()
        cfs = mock.Mock(return_value=ep)
        with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor):
            with mock.patch("allmydata.util.tor_provider.clientFromString", cfs):
                d = p._make_control_endpoint(reactor,
                                             update_status=lambda status: None)
                yield flushEventualQueue()
                self.assertIs(self.successResultOf(d), ep)
                launch_tor.assert_called_with(reactor, None,
                                              os.path.join("basedir", "private"),
                                              txtorcon)
                cfs.assert_called_with(reactor, ep_desc)

        launch_tor2 = mock.Mock(return_value=defer.succeed((ep_desc,None)))
        cfs2 = mock.Mock(return_value=ep)
        with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor2):
            with mock.patch("allmydata.util.tor_provider.clientFromString", cfs2):
                d2 = p._make_control_endpoint(reactor,
                                              update_status=lambda status: None)
                yield flushEventualQueue()
                self.assertIs(self.successResultOf(d2), ep)
                self.assertEqual(launch_tor2.mock_calls, [])
                cfs2.assert_called_with(reactor, ep_desc)
示例#14
0
 def testResolveAfter(self):
     t = Target()
     p,r = makePromise()
     sendOnly(p).one(1)
     r(t)
     d = flushEventualQueue()
     def _check(res):
         self.assertEqual(t.calls, [("one", 1)])
     d.addCallback(_check)
     return d
示例#15
0
 def testResolveAfter(self):
     t = Target()
     p,r = makePromise()
     sendOnly(p).one(1)
     r(t)
     d = flushEventualQueue()
     def _check(res):
         self.failUnlessEqual(t.calls, [("one", 1)])
     d.addCallback(_check)
     return d
示例#16
0
    def testFlush(self):
        results = []
        eventually(results.append, 1)
        eventually(results.append, 2)
        d = flushEventualQueue()

        def _check(res):
            self.failUnlessEqual(results, [1, 2])

        d.addCallback(_check)
        return d
示例#17
0
    def testResolveBefore(self):
        t = Target()
        p, r = makePromise()
        r(t)
        sendOnly(p).one(1)
        d = flushEventualQueue()

        def _check(res):
            self.failUnlessEqual(t.calls, [("one", 1)])

        d.addCallback(_check)
        return d
示例#18
0
 def testNotifyOnDisconnect_args(self):
     rr, target = self.setupTarget(HelperTarget())
     self.lost = 0
     rr.notifyOnDisconnect(self.disconnected, "arg", foo="kwarg")
     rr.tracker.broker.transport.loseConnection(Failure(CONNECTION_LOST))
     d = flushEventualQueue()
     def _check(res):
         self.failUnless(self.lost)
         self.failUnlessEqual(self.lost_args, (("arg",),
                                               {"foo": "kwarg"}))
     d.addCallback(_check)
     return d
示例#19
0
    def testFire(self):
        results = []
        fireEventually(1).addCallback(results.append)
        fireEventually(2).addCallback(results.append)
        self.failIf(results)

        def _check(res):
            self.failUnlessEqual(results, [1, 2])

        d = flushEventualQueue()
        d.addCallback(_check)
        return d
示例#20
0
 def testNotifyOnDisconnect_unregister(self):
     rr, target = self.setupTarget(HelperTarget())
     self.lost = 0
     m = rr.notifyOnDisconnect(self.disconnected)
     rr.dontNotifyOnDisconnect(m)
     # dontNotifyOnDisconnect is supposed to be tolerant of duplicate
     # unregisters, because otherwise it is hard to avoid race conditions.
     # Validate that we can unregister something multiple times.
     rr.dontNotifyOnDisconnect(m)
     rr.tracker.broker.transport.loseConnection(Failure(CONNECTION_LOST))
     d = flushEventualQueue()
     d.addCallback(lambda res: self.failIf(self.lost))
     return d
示例#21
0
 def testNotifyOnDisconnect(self):
     rr, target = self.setupTarget(HelperTarget())
     self.lost = 0
     self.failUnlessEqual(rr.isConnected(), True)
     rr.notifyOnDisconnect(self.disconnected)
     rr.tracker.broker.transport.loseConnection(Failure(CONNECTION_LOST))
     d = flushEventualQueue()
     def _check(res):
         self.failUnlessEqual(rr.isConnected(), False)
         self.failUnless(self.lost)
         self.failUnlessEqual(self.lost_args, ((),{}))
         # it should be safe to unregister now, even though the callback
         # has already fired, since dontNotifyOnDisconnect is tolerant
         rr.dontNotifyOnDisconnect(self.disconnected)
     d.addCallback(_check)
     return d
示例#22
0
    def testCalled(self):
        """
        The object passed to L{Broker._notifyOnConnectionLost} is called when the
        L{Broker} is notify that its connection has been lost.
        """
        transport = NullTransport()
        protocol = broker.Broker(None)
        protocol.makeConnection(transport)
        disconnected = []
        protocol._notifyOnConnectionLost(lambda: disconnected.append(1))
        protocol._notifyOnConnectionLost(lambda: disconnected.append(2))
        protocol.connectionLost(failure.Failure(Exception("Connection lost")))

        d = flushEventualQueue()
        def flushed(ignored):
            self.assertEqual([1, 2], disconnected)
        d.addCallback(flushed)
        return d
示例#23
0
    def test_control_endpoint(self):
        basedir = self.mktemp()
        os.mkdir(basedir)
        fn = os.path.join(basedir, "keyfile")
        with open(fn, "w") as f:
            f.write("private key")
        reactor = object()
        cfg = FakeConfig(basedir=basedir,
                         onion=True,
                         **{
                             "control.port": "ep_desc",
                             "onion.local_port": 123,
                             "onion.external_port": 456,
                             "onion.private_key_file": "keyfile",
                         })

        txtorcon = mock.Mock()
        with mock_txtorcon(txtorcon):
            p = tor_provider.create(reactor, cfg)
        tor_state = mock.Mock()
        tor_state.protocol = object()
        txtorcon.build_tor_connection = mock.Mock(return_value=tor_state)
        ehs = mock.Mock()
        ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None))
        ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None))
        txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs)
        tcep = object()
        cfs = mock.Mock(return_value=tcep)
        with mock.patch("allmydata.util.tor_provider.clientFromString", cfs):
            d = p.startService()
            yield flushEventualQueue()
        self.successResultOf(d)
        self.assertIs(p._onion_ehs, ehs)
        self.assertIs(p._onion_tor_control_proto, tor_state.protocol)
        cfs.assert_called_with(reactor, "ep_desc")
        txtorcon.build_tor_connection.assert_called_with(tcep)
        txtorcon.EphemeralHiddenService.assert_called_with(
            "456 127.0.0.1:123", "private key")
        ehs.add_to_tor.assert_called_with(tor_state.protocol)

        yield p.stopService()
        ehs.remove_from_tor.assert_called_with(tor_state.protocol)
示例#24
0
    def testCalled(self):
        """
        The object passed to L{Broker._notifyOnConnectionLost} is called when the
        L{Broker} is notify that its connection has been lost.
        """
        transport = NullTransport()
        protocol = broker.Broker(None)
        protocol.makeConnection(transport)
        disconnected = []
        protocol._notifyOnConnectionLost(lambda: disconnected.append(1))
        protocol._notifyOnConnectionLost(lambda: disconnected.append(2))
        protocol.connectionLost(failure.Failure(Exception("Connection lost")))

        d = flushEventualQueue()

        def flushed(ignored):
            self.assertEqual([1, 2], disconnected)

        d.addCallback(flushed)
        return d
示例#25
0
    def test_control_endpoint(self):
        basedir = self.mktemp()
        os.mkdir(basedir)
        fn = os.path.join(basedir, "keyfile")
        with open(fn, "w") as f:
            f.write("private key")
        reactor = object()
        cfg = FakeConfig(basedir=basedir, onion=True,
                         **{"control.port": "ep_desc",
                            "onion.local_port": 123,
                            "onion.external_port": 456,
                            "onion.private_key_file": "keyfile",
                            })

        txtorcon = mock.Mock()
        with mock_txtorcon(txtorcon):
            p = tor_provider.create(reactor, cfg)
        tor_state = mock.Mock()
        tor_state.protocol = object()
        txtorcon.build_tor_connection = mock.Mock(return_value=tor_state)
        ehs = mock.Mock()
        ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None))
        ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None))
        txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs)
        tcep = object()
        cfs = mock.Mock(return_value=tcep)
        with mock.patch("allmydata.util.tor_provider.clientFromString", cfs):
            d = p.startService()
            yield flushEventualQueue()
        self.successResultOf(d)
        self.assertIs(p._onion_ehs, ehs)
        self.assertIs(p._onion_tor_control_proto, tor_state.protocol)
        cfs.assert_called_with(reactor, "ep_desc")
        txtorcon.build_tor_connection.assert_called_with(tcep)
        txtorcon.EphemeralHiddenService.assert_called_with("456 127.0.0.1:123",
                                                           "private key")
        ehs.add_to_tor.assert_called_with(tor_state.protocol)

        yield p.stopService()
        ehs.remove_from_tor.assert_called_with(tor_state.protocol)
示例#26
0
    def test_launch(self):
        basedir = self.mktemp()
        os.mkdir(basedir)
        fn = os.path.join(basedir, "keyfile")
        with open(fn, "w") as f:
            f.write("private key")
        reactor = object()
        cfg = FakeConfig(onion=True, launch=True,
                         **{"onion.local_port": 123,
                            "onion.external_port": 456,
                            "onion.private_key_file": "keyfile",
                            })

        txtorcon = mock.Mock()
        with mock_txtorcon(txtorcon):
            p = tor_provider.Provider(basedir, cfg, reactor)
        tor_state = mock.Mock()
        tor_state.protocol = object()
        ehs = mock.Mock()
        ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None))
        ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None))
        txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs)
        launch_tor = mock.Mock(return_value=defer.succeed((None,tor_state.protocol)))
        with mock.patch("allmydata.util.tor_provider._launch_tor",
                        launch_tor):
            d = p.startService()
            yield flushEventualQueue()
        self.successResultOf(d)
        self.assertIs(p._onion_ehs, ehs)
        self.assertIs(p._onion_tor_control_proto, tor_state.protocol)
        launch_tor.assert_called_with(reactor, None,
                                      os.path.join(basedir, "private"), txtorcon)
        txtorcon.EphemeralHiddenService.assert_called_with("456 127.0.0.1:123",
                                                           "private key")
        ehs.add_to_tor.assert_called_with(tor_state.protocol)

        yield p.stopService()
        ehs.remove_from_tor.assert_called_with(tor_state.protocol)
示例#27
0
 def tearDown(self):
     return eventual.flushEventualQueue()
示例#28
0
 def tearDown(self):
     return flushEventualQueue()
示例#29
0
 def event(ignored):
     self.assertNoResult(stopping)
     for b in brokers:
         b.connectionLost(failure.Failure(Exception("Connection lost")))
     return flushEventualQueue()
示例#30
0
 def event(ignored):
     self.assertNoResult(stopping)
     for b in brokers:
         b.connectionLost(failure.Failure(Exception("Connection lost")))
     return flushEventualQueue()