示例#1
0
 def _publish_stub_client(ignored):
     furl = self._stub_client_furl
     self.publish(
         "stub_client", {
             "anonymous-storage-FURL": furl,
             "permutation-seed-base32": get_tubid_string(furl),
         })
示例#2
0
 def _publish_stub_client(ignored):
     furl = self._stub_client_furl
     self.publish("stub_client",
                  { "anonymous-storage-FURL": furl,
                    "permutation-seed-base32": get_tubid_string(furl),
                    })
def make_ann(furl):
    ann = { "anonymous-storage-FURL": furl,
            "permutation-seed-base32": get_tubid_string(furl) }
    return ann
示例#4
0
def make_ann(furl):
    ann = {
        "anonymous-storage-FURL": furl,
        "permutation-seed-base32": get_tubid_string(furl)
    }
    return ann
示例#5
0
    def do_system_test(self, server_version):
        self.create_tub()
        if server_version == V1:
            introducer = old.IntroducerService_v1()
        else:
            introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            l = tub.listenOn("tcp:0")
            portnum = l.getPortnum()
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            if i == 0:
                c = old.IntroducerClient_v1(tub, self.introducer_furl,
                                            NICKNAME % str(i),
                                            "version", "oldest")
            else:
                c = IntroducerClient(tub, self.introducer_furl,
                                     NICKNAME % str(i),
                                     "version", "oldest",
                                     {"component": "component-v1"})
            received_announcements[c] = {}
            def got(key_s_or_tubid, ann, announcements, i):
                if i == 0:
                    index = get_tubid_string_from_ann(ann)
                else:
                    index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann
            c.subscribe_to("storage", got, received_announcements[c], i)
            subscribing_clients.append(c)
            expected_announcements[i] += 1 # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            if i < NUM_STORAGE:
                if i == 0:
                    c.publish(node_furl, "storage", "ri_name")
                    printable_serverids[i] = get_tubid_string(node_furl)
                elif i == 1:
                    # sign the announcement
                    privkey_s, pubkey_s = keyutil.make_keypair()
                    privkey, _ignored = keyutil.parse_privkey(privkey_s)
                    privkeys[c] = privkey
                    c.publish("storage", make_ann(node_furl), privkey)
                    if server_version == V1:
                        printable_serverids[i] = get_tubid_string(node_furl)
                    else:
                        assert pubkey_s.startswith("pub-")
                        printable_serverids[i] = pubkey_s[len("pub-"):]
                else:
                    c.publish("storage", make_ann(node_furl))
                    printable_serverids[i] = get_tubid_string(node_furl)
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 0:
                # users of the V1 client were required to publish a
                # 'stub_client' record (somewhat after they published the
                # 'storage' record), so the introducer could see their
                # version. Match that behavior.
                c.publish(node_furl, "stub_client", "stub_ri_name")

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl))

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub


        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True
            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i,c in enumerate(subscribing_clients):
                    if len(received_announcements[c]) < expected_announcements[i]:
                        return False
                return True
            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True
            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            if server_version == V1:
                # each storage server publishes a record, and (after its
                # 'subscribe' has been ACKed) also publishes a "stub_client".
                # The non-storage client (which subscribes) also publishes a
                # stub_client. There is also one "boring" service. The number
                # of messages is higher, because the stub_clients aren't
                # published until after we get the 'subscribe' ack (since we
                # don't realize that we're dealing with a v1 server [which
                # needs stub_clients] until then), and the act of publishing
                # the stub_client causes us to re-send all previous
                # announcements.
                self.failUnlessEqual(dc["inbound_message"] - dc["inbound_duplicate"],
                                     NUM_STORAGE + NUM_CLIENTS + 1)
            else:
                # each storage server publishes a record. There is also one
                # "stub_client" and one "boring"
                self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+2)
                self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                nodeid0 = tubs[clients[0]].tubID
                ann = anns[nodeid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            if server_version == V1:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1 # storage
                    if c is clients[2]:
                        expected += 1 # boring
                    if c is not clients[0]:
                        # the v2 client tries to call publish_v2, which fails
                        # because the server is v1. It then re-sends
                        # everything it has so far, plus a stub_client record
                        expected = 2*expected + 1
                    if c is clients[0]:
                        # we always tell v1 client to send stub_client
                        expected += 1
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            else:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1
                    if c in [clients[0], # stub_client
                             clients[2], # boring
                             ]:
                        expected = 2
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # the v1 client
            self.failUnlessIn(NICKNAME % "1", text) # a v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True
            return self.poll(_introducer_lost)
        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)
        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            if server_version == V1:
                introducer = old.IntroducerService_v1()
            else:
                introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)

        d.addCallback(_check3)
        return d
示例#6
0
    def do_system_test(self, server_version):
        self.create_tub()
        if server_version == V1:
            introducer = old.IntroducerService_v1()
        else:
            introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            l = tub.listenOn("tcp:0")
            portnum = l.getPortnum()
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            if i == 0:
                c = old.IntroducerClient_v1(tub, self.introducer_furl,
                                            NICKNAME % str(i), "version",
                                            "oldest")
            else:
                c = IntroducerClient(tub, self.introducer_furl,
                                     NICKNAME % str(i), "version", "oldest",
                                     {"component": "component-v1"})
            received_announcements[c] = {}

            def got(key_s_or_tubid, ann, announcements, i):
                if i == 0:
                    index = get_tubid_string_from_ann(ann)
                else:
                    index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann

            c.subscribe_to("storage", got, received_announcements[c], i)
            subscribing_clients.append(c)
            expected_announcements[
                i] += 1  # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            if i < NUM_STORAGE:
                if i == 0:
                    c.publish(node_furl, "storage", "ri_name")
                    printable_serverids[i] = get_tubid_string(node_furl)
                elif i == 1:
                    # sign the announcement
                    privkey_s, pubkey_s = keyutil.make_keypair()
                    privkey, _ignored = keyutil.parse_privkey(privkey_s)
                    privkeys[c] = privkey
                    c.publish("storage", make_ann(node_furl), privkey)
                    if server_version == V1:
                        printable_serverids[i] = get_tubid_string(node_furl)
                    else:
                        assert pubkey_s.startswith("pub-")
                        printable_serverids[i] = pubkey_s[len("pub-"):]
                else:
                    c.publish("storage", make_ann(node_furl))
                    printable_serverids[i] = get_tubid_string(node_furl)
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 0:
                # users of the V1 client were required to publish a
                # 'stub_client' record (somewhat after they published the
                # 'storage' record), so the introducer could see their
                # version. Match that behavior.
                c.publish(node_furl, "stub_client", "stub_ri_name")

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl))

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True

            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i, c in enumerate(subscribing_clients):
                    if len(received_announcements[c]
                           ) < expected_announcements[i]:
                        return False
                return True

            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True

            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            if server_version == V1:
                # each storage server publishes a record, and (after its
                # 'subscribe' has been ACKed) also publishes a "stub_client".
                # The non-storage client (which subscribes) also publishes a
                # stub_client. There is also one "boring" service. The number
                # of messages is higher, because the stub_clients aren't
                # published until after we get the 'subscribe' ack (since we
                # don't realize that we're dealing with a v1 server [which
                # needs stub_clients] until then), and the act of publishing
                # the stub_client causes us to re-send all previous
                # announcements.
                self.failUnlessEqual(
                    dc["inbound_message"] - dc["inbound_duplicate"],
                    NUM_STORAGE + NUM_CLIENTS + 1)
            else:
                # each storage server publishes a record. There is also one
                # "stub_client" and one "boring"
                self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 2)
                self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                nodeid0 = tubs[clients[0]].tubID
                ann = anns[nodeid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            if server_version == V1:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1  # storage
                    if c is clients[2]:
                        expected += 1  # boring
                    if c is not clients[0]:
                        # the v2 client tries to call publish_v2, which fails
                        # because the server is v1. It then re-sends
                        # everything it has so far, plus a stub_client record
                        expected = 2 * expected + 1
                    if c is clients[0]:
                        # we always tell v1 client to send stub_client
                        expected += 1
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            else:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1
                    if c in [
                            clients[0],  # stub_client
                            clients[2],  # boring
                    ]:
                        expected = 2
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text)  # the v1 client
            self.failUnlessIn(NICKNAME % "1", text)  # a v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i, printable_serverids[i], text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i, printable_serverids[i], text))
            log.msg("_check1 done")

        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True

            return self.poll(_introducer_lost)

        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            if server_version == V1:
                introducer = old.IntroducerService_v1()
            else:
                introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check3)
        return d