예제 #1
0
    def do_system_test(self):
        self.create_tub()
        introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        pubkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            portnum = iputil.allocate_tcp_port()
            tub.listenOn("tcp:%d" % portnum)
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl,
                                 NICKNAME % str(i),
                                 "version", "oldest",
                                 {"component": "component-v1"}, fakeseq,
                                 FilePath(self.mktemp()))
            received_announcements[c] = {}
            def got(key_s_or_tubid, ann, announcements):
                index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann
            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)
            expected_announcements[i] += 1 # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            privkey_s, pubkey_s = keyutil.make_keypair()
            privkey, _ignored = keyutil.parse_privkey(privkey_s)
            privkeys[i] = privkey
            pubkeys[i] = pubkey_s

            if i < NUM_STORAGE:
                # sign all announcements
                c.publish("storage", make_ann(node_furl), privkey)
                assert pubkey_s.startswith("pub-")
                printable_serverids[i] = pubkey_s[len("pub-"):]
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl), privkey)

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub


        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True
            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i,c in enumerate(subscribing_clients):
                    if len(received_announcements[c]) < expected_announcements[i]:
                        return False
                return True
            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True
            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [clients[2], # boring
                         ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # a v2 client
            self.failUnlessIn(NICKNAME % "1", text) # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True
            return self.poll(_introducer_lost)
        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)
        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)

        d.addCallback(_check3)
        return d
예제 #2
0
    def do_system_test(self, create_introducer):
        self.create_tub()
        introducer = create_introducer()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        NUMCLIENTS = 5
        # we have 5 clients who publish themselves, and an extra one does
        # which not. When the connections are fully established, all six nodes
        # should have 5 connections each.

        clients = []
        tubs = {}
        received_announcements = {}
        NUM_SERVERS = NUMCLIENTS
        subscribing_clients = []
        publishing_clients = []

        for i in range(NUMCLIENTS+1):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            l = tub.listenOn("tcp:0")
            portnum = l.getPortnum()
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i,
                                 "version", "oldest")
            received_announcements[c] = {}
            def got(serverid, ann_d, announcements):
                announcements[serverid] = ann_d
            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)

            if i < NUMCLIENTS:
                node_furl = tub.registerReference(Referenceable())
                c.publish(node_furl, "storage", "ri_name")
                publishing_clients.append(c)
            # the last one does not publish anything

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_all_connections():
            for c in subscribing_clients:
                if len(received_announcements[c]) < NUM_SERVERS:
                    return False
            return True
        d = self.poll(_wait_for_all_connections)

        def _check1(res):
            log.msg("doing _check1")
            dc = introducer._debug_counts
            self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnless(dc["outbound_message"])

            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_SERVERS)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_SERVERS)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_SERVERS)

                nodeid0 = b32decode(tubs[clients[0]].tubID.upper())
                ann_d = anns[nodeid0]
                nick = ann_d["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, u"nickname-0")
            for c in publishing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["outbound_message"], 1)
        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss():
            for c in clients:
                if c.connected_to_introducer():
                    return False
            return True
        d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")

            dc = introducer._debug_counts
            self.expected_count = dc["inbound_message"] + NUM_SERVERS
            self.expected_subscribe_count = dc["inbound_subscribe"] + NUMCLIENTS+1
            introducer._debug0 = dc["outbound_message"]
            for c in subscribing_clients:
                cdc = c._debug_counts
                c._debug0 = cdc["inbound_message"]

            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer_tub)

        def _wait_for_introducer_reconnect():
            # wait until:
            #  all clients are connected
            #  the introducer has received publish messages from all of them
            #  the introducer has received subscribe messages from all of them
            #  the introducer has sent (duplicate) announcements to all of them
            #  all clients have received (duplicate) announcements
            dc = introducer._debug_counts
            for c in clients:
                if not c.connected_to_introducer():
                    return False
            if dc["inbound_message"] < self.expected_count:
                return False
            if dc["inbound_subscribe"] < self.expected_subscribe_count:
                return False
            for c in subscribing_clients:
                cdc = c._debug_counts
                if cdc["inbound_message"] < c._debug0+1:
                    return False
            return True
        d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect))

        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = introducer._debug_counts
            self.failUnlessEqual(dc["inbound_message"], 2*NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["outbound_message"],
                                 introducer._debug0 + len(subscribing_clients))
            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS)
        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)

            for c in subscribing_clients:
                # record some counters for later comparison. Stash the values
                # on the client itself, because I'm lazy.
                cdc = c._debug_counts
                c._debug1 = cdc["inbound_announcement"]
                c._debug2 = cdc["inbound_message"]
                c._debug3 = cdc["new_announcement"]
            newintroducer = create_introducer()
            self.expected_message_count = NUM_SERVERS
            self.expected_announcement_count = NUM_SERVERS*len(subscribing_clients)
            self.expected_subscribe_count = len(subscribing_clients)
            newfurl = self.central_tub.registerReference(newintroducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer)
        def _wait_for_introducer_reconnect2():
            # wait until:
            #  all clients are connected
            #  the introducer has received publish messages from all of them
            #  the introducer has received subscribe messages from all of them
            #  the introducer has sent announcements for everybody to everybody
            #  all clients have received all the (duplicate) announcements
            # at that point, the system should be quiescent
            dc = introducer._debug_counts
            for c in clients:
                if not c.connected_to_introducer():
                    return False
            if dc["inbound_message"] < self.expected_message_count:
                return False
            if dc["outbound_announcements"] < self.expected_announcement_count:
                return False
            if dc["inbound_subscribe"] < self.expected_subscribe_count:
                return False
            for c in subscribing_clients:
                cdc = c._debug_counts
                if cdc["inbound_announcement"] < c._debug1+NUM_SERVERS:
                    return False
            return True
        d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2))

        def _check3(res):
            log.msg("doing _check3")
            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_announcement"] > c._debug1)
                self.failUnless(cdc["inbound_message"] > c._debug2)
                # there should have been no new announcements
                self.failUnlessEqual(cdc["new_announcement"], c._debug3)
                # and the right number of duplicate ones. There were
                # NUM_SERVERS from the servertub restart, and there should be
                # another NUM_SERVERS now
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     2*NUM_SERVERS)

        d.addCallback(_check3)
        return d
예제 #3
0
    def do_system_test(self):
        self.create_tub()
        introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        pubkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            listenOnUnused(tub)
            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i),
                                 "version", "oldest",
                                 {"component": "component-v1"}, fakeseq,
                                 FilePath(self.mktemp()))
            received_announcements[c] = {}

            def got(key_s_or_tubid, ann, announcements):
                index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann

            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)
            expected_announcements[
                i] += 1  # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            privkey_s, pubkey_s = keyutil.make_keypair()
            privkey, _ignored = keyutil.parse_privkey(privkey_s)
            privkeys[i] = privkey
            pubkeys[i] = pubkey_s

            if i < NUM_STORAGE:
                # sign all announcements
                c.publish("storage", make_ann(node_furl), privkey)
                assert pubkey_s.startswith("pub-")
                printable_serverids[i] = pubkey_s[len("pub-"):]
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl), privkey)

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True

            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i, c in enumerate(subscribing_clients):
                    if len(received_announcements[c]
                           ) < expected_announcements[i]:
                        return False
                return True

            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True

            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [
                        clients[2],  # boring
                ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text)  # a v2 client
            self.failUnlessIn(NICKNAME % "1", text)  # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i, printable_serverids[i], text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i, printable_serverids[i], text))
            log.msg("_check1 done")

        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True

            return self.poll(_introducer_lost)

        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check3)
        return d
예제 #4
0
    def do_system_test(self, create_introducer):
        self.create_tub()
        introducer = create_introducer()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        NUMCLIENTS = 5
        # we have 5 clients who publish themselves, and an extra one does
        # which not. When the connections are fully established, all six nodes
        # should have 5 connections each.

        clients = []
        tubs = {}
        received_announcements = {}
        NUM_SERVERS = NUMCLIENTS
        subscribing_clients = []
        publishing_clients = []

        for i in range(NUMCLIENTS + 1):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            l = tub.listenOn("tcp:0")
            portnum = l.getPortnum()
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i,
                                 "version", "oldest")
            received_announcements[c] = {}

            def got(serverid, ann_d, announcements):
                announcements[serverid] = ann_d

            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)

            if i < NUMCLIENTS:
                node_furl = tub.registerReference(Referenceable())
                c.publish(node_furl, "storage", "ri_name")
                publishing_clients.append(c)
            # the last one does not publish anything

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_all_connections():
            for c in subscribing_clients:
                if len(received_announcements[c]) < NUM_SERVERS:
                    return False
            return True

        d = self.poll(_wait_for_all_connections)

        def _check1(res):
            log.msg("doing _check1")
            dc = introducer._debug_counts
            self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnless(dc["outbound_message"])

            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_SERVERS)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_SERVERS)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_SERVERS)

                nodeid0 = b32decode(tubs[clients[0]].tubID.upper())
                ann_d = anns[nodeid0]
                nick = ann_d["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, u"nickname-0")
            for c in publishing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["outbound_message"], 1)

        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss():
            for c in clients:
                if c.connected_to_introducer():
                    return False
            return True

        d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")

            dc = introducer._debug_counts
            self.expected_count = dc["inbound_message"] + NUM_SERVERS
            self.expected_subscribe_count = dc[
                "inbound_subscribe"] + NUMCLIENTS + 1
            introducer._debug0 = dc["outbound_message"]
            for c in subscribing_clients:
                cdc = c._debug_counts
                c._debug0 = cdc["inbound_message"]

            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer_tub)

        def _wait_for_introducer_reconnect():
            # wait until:
            #  all clients are connected
            #  the introducer has received publish messages from all of them
            #  the introducer has received subscribe messages from all of them
            #  the introducer has sent (duplicate) announcements to all of them
            #  all clients have received (duplicate) announcements
            dc = introducer._debug_counts
            for c in clients:
                if not c.connected_to_introducer():
                    return False
            if dc["inbound_message"] < self.expected_count:
                return False
            if dc["inbound_subscribe"] < self.expected_subscribe_count:
                return False
            for c in subscribing_clients:
                cdc = c._debug_counts
                if cdc["inbound_message"] < c._debug0 + 1:
                    return False
            return True

        d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect))

        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = introducer._debug_counts
            self.failUnlessEqual(dc["inbound_message"], 2 * NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["outbound_message"],
                                 introducer._debug0 + len(subscribing_clients))
            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_SERVERS)

        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)

            for c in subscribing_clients:
                # record some counters for later comparison. Stash the values
                # on the client itself, because I'm lazy.
                cdc = c._debug_counts
                c._debug1 = cdc["inbound_announcement"]
                c._debug2 = cdc["inbound_message"]
                c._debug3 = cdc["new_announcement"]
            newintroducer = create_introducer()
            self.expected_message_count = NUM_SERVERS
            self.expected_announcement_count = NUM_SERVERS * len(
                subscribing_clients)
            self.expected_subscribe_count = len(subscribing_clients)
            newfurl = self.central_tub.registerReference(newintroducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer)

        def _wait_for_introducer_reconnect2():
            # wait until:
            #  all clients are connected
            #  the introducer has received publish messages from all of them
            #  the introducer has received subscribe messages from all of them
            #  the introducer has sent announcements for everybody to everybody
            #  all clients have received all the (duplicate) announcements
            # at that point, the system should be quiescent
            dc = introducer._debug_counts
            for c in clients:
                if not c.connected_to_introducer():
                    return False
            if dc["inbound_message"] < self.expected_message_count:
                return False
            if dc["outbound_announcements"] < self.expected_announcement_count:
                return False
            if dc["inbound_subscribe"] < self.expected_subscribe_count:
                return False
            for c in subscribing_clients:
                cdc = c._debug_counts
                if cdc["inbound_announcement"] < c._debug1 + NUM_SERVERS:
                    return False
            return True

        d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2))

        def _check3(res):
            log.msg("doing _check3")
            for c in clients:
                self.failUnless(c.connected_to_introducer())
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_announcement"] > c._debug1)
                self.failUnless(cdc["inbound_message"] > c._debug2)
                # there should have been no new announcements
                self.failUnlessEqual(cdc["new_announcement"], c._debug3)
                # and the right number of duplicate ones. There were
                # NUM_SERVERS from the servertub restart, and there should be
                # another NUM_SERVERS now
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     2 * NUM_SERVERS)

        d.addCallback(_check3)
        return d