def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest") announcements = {} def got(serverid, ann_d): announcements[serverid] = ann_d c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless(c._introducer_error.check(InsufficientVersionError)) d.addCallback(_done) return d
def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest", fakeseq, FilePath(self.mktemp())) announcements = {} def got(key_s, ann): announcements[key_s] = ann c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless(c._introducer_error.check(InsufficientVersionError), c._introducer_error) d.addCallback(_done) return d
def test_duplicate_receive_v1(self): ic = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}) announcements = [] ic.subscribe_to("storage", lambda key_s, ann: announcements.append(ann)) furl1 = "pb://[email protected]:36106/gydnpigj2ja2qr2srq4ikjwnl7xfgbra" ann1 = (furl1, "storage", "RIStorage", "nick1", "ver23", "ver0") ann1b = (furl1, "storage", "RIStorage", "nick1", "ver24", "ver0") ca = WrapV2ClientInV1Interface(ic) ca.remote_announce([ann1]) d = fireEventually() def _then(ign): self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]["nickname"], u"nick1") self.failUnlessEqual(announcements[0]["my-version"], "ver23") self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 1) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 0) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 0) # now send a duplicate announcement: this should not notify clients ca.remote_announce([ann1]) return fireEventually() d.addCallback(_then) def _then2(ign): self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 2) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 0) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) # and a replacement announcement: same FURL, new other stuff. # Clients should be notified. ca.remote_announce([ann1b]) return fireEventually() d.addCallback(_then2) def _then3(ign): self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 3) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 1) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) # test that the other stuff changed self.failUnlessEqual(announcements[-1]["nickname"], u"nick1") self.failUnlessEqual(announcements[-1]["my-version"], "ver24") d.addCallback(_then3) return d
def test_unsigned_announcement(self): """ An incorrectly signed announcement is not delivered to subscribers. """ private_key, public_key = ed25519.create_signing_keypair() public_key_str = ed25519.string_from_verifying_key(public_key) ic = IntroducerClient( Tub(), "pb://", u"fake_nick", "0.0.0", "1.2.3", (0, u"i am a nonce"), FilePath(self.mktemp()), ) received = {} ic.subscribe_to("good-stuff", partial(setitem, received)) # Deliver a good message to prove our test code is valid. ann = {"service-name": "good-stuff", "payload": "hello"} ann_t = sign_to_foolscap(ann, private_key) ic.got_announcements([ann_t]) self.assertEqual( {public_key_str[len("pub-"):]: ann}, received, ) received.clear() # Now deliver one without a valid signature and observe that it isn't # delivered to the subscriber. ann = {"service-name": "good-stuff", "payload": "bad stuff"} (msg, sig, key) = sign_to_foolscap(ann, private_key) # Drop a base32 word from the middle of the key to invalidate the # signature. sig_a = bytearray(sig) sig_a[20:22] = [] sig = bytes(sig_a) ann_t = (msg, sig, key) ic.got_announcements([ann_t]) # The received announcements dict should remain empty because we # should not receive the announcement with the invalid signature. self.assertEqual( {}, received, )
def test_duplicate_receive_v1(self): ic = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}) announcements = [] ic.subscribe_to("storage", lambda key_s,ann: announcements.append(ann)) furl1 = "pb://[email protected]:36106/gydnpigj2ja2qr2srq4ikjwnl7xfgbra" ann1 = (furl1, "storage", "RIStorage", "nick1", "ver23", "ver0") ann1b = (furl1, "storage", "RIStorage", "nick1", "ver24", "ver0") ca = WrapV2ClientInV1Interface(ic) ca.remote_announce([ann1]) d = fireEventually() def _then(ign): self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]["nickname"], u"nick1") self.failUnlessEqual(announcements[0]["my-version"], "ver23") self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 1) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 0) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 0) # now send a duplicate announcement: this should not notify clients ca.remote_announce([ann1]) return fireEventually() d.addCallback(_then) def _then2(ign): self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 2) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 0) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) # and a replacement announcement: same FURL, new other stuff. # Clients should be notified. ca.remote_announce([ann1b]) return fireEventually() d.addCallback(_then2) def _then3(ign): self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(ic._debug_counts["inbound_announcement"], 3) self.failUnlessEqual(ic._debug_counts["new_announcement"], 1) self.failUnlessEqual(ic._debug_counts["update"], 1) self.failUnlessEqual(ic._debug_counts["duplicate_announcement"], 1) # test that the other stuff changed self.failUnlessEqual(announcements[-1]["nickname"], u"nick1") self.failUnlessEqual(announcements[-1]["my-version"], "ver24") d.addCallback(_then3) return d
def test_id_collision(self): # test replacement case where tubid equals a keyid (one should # not replace the other) ic = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}) announcements = [] ic.subscribe_to("storage", lambda key_s, ann: announcements.append(ann)) sk_s, vk_s = keyutil.make_keypair() sk, _ignored = keyutil.parse_privkey(sk_s) keyid = keyutil.remove_prefix(vk_s, "pub-v0-") furl1 = "pb://[email protected]:123/short" # base32("short") furl2 = "pb://%[email protected]:36106/swissnum" % keyid ann_t = ic.create_announcement("storage", make_ann(furl1), sk) ic.remote_announce_v2([ann_t]) d = fireEventually() def _then(ign): # first announcement has been processed self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]["anonymous-storage-FURL"], furl1) # now submit a second one, with a tubid that happens to look just # like the pubkey-based serverid we just processed. They should # not overlap. ann2 = (furl2, "storage", "RIStorage", "nick1", "ver23", "ver0") ca = WrapV2ClientInV1Interface(ic) ca.remote_announce([ann2]) return fireEventually() d.addCallback(_then) def _then2(ign): # if they overlapped, the second announcement would be ignored self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(announcements[1]["anonymous-storage-FURL"], furl2) d.addCallback(_then2) return d
def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest") announcements = {} def got(serverid, ann_d): announcements[serverid] = ann_d c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless( c._introducer_error.check(InsufficientVersionError)) d.addCallback(_done) return d
def test_id_collision(self): # test replacement case where tubid equals a keyid (one should # not replace the other) ic = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}) announcements = [] ic.subscribe_to("storage", lambda key_s,ann: announcements.append(ann)) sk_s, vk_s = keyutil.make_keypair() sk, _ignored = keyutil.parse_privkey(sk_s) keyid = keyutil.remove_prefix(vk_s, "pub-v0-") furl1 = "pb://[email protected]:123/short" # base32("short") furl2 = "pb://%[email protected]:36106/swissnum" % keyid ann_t = ic.create_announcement("storage", make_ann(furl1), sk) ic.remote_announce_v2([ann_t]) d = fireEventually() def _then(ign): # first announcement has been processed self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]["anonymous-storage-FURL"], furl1) # now submit a second one, with a tubid that happens to look just # like the pubkey-based serverid we just processed. They should # not overlap. ann2 = (furl2, "storage", "RIStorage", "nick1", "ver23", "ver0") ca = WrapV2ClientInV1Interface(ic) ca.remote_announce([ann2]) return fireEventually() d.addCallback(_then) def _then2(ign): # if they overlapped, the second announcement would be ignored self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(announcements[1]["anonymous-storage-FURL"], furl2) d.addCallback(_then2) return d
def do_system_test(self): self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl # we have 5 clients who publish themselves as storage servers, and a # sixth which does which not. All 6 clients subscriber to hear about # storage. When the connections are fully established, all six nodes # should have 5 connections each. NUM_STORAGE = 5 NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} subscribing_clients = [] publishing_clients = [] printable_serverids = {} self.the_introducer = introducer privkeys = {} pubkeys = {} expected_announcements = [0 for c in range(NUM_CLIENTS)] for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) portnum = iputil.allocate_tcp_port() tub.listenOn("tcp:%d" % portnum) tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i), "version", "oldest", {"component": "component-v1"}, fakeseq, FilePath(self.mktemp())) received_announcements[c] = {} def got(key_s_or_tubid, ann, announcements): index = key_s_or_tubid or get_tubid_string_from_ann(ann) announcements[index] = ann c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) expected_announcements[i] += 1 # all expect a 'storage' announcement node_furl = tub.registerReference(Referenceable()) privkey_s, pubkey_s = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) privkeys[i] = privkey pubkeys[i] = pubkey_s if i < NUM_STORAGE: # sign all announcements c.publish("storage", make_ann(node_furl), privkey) assert pubkey_s.startswith("pub-") printable_serverids[i] = pubkey_s[len("pub-"):] publishing_clients.append(c) else: # the last one does not publish anything pass if i == 2: # also publish something that nobody cares about boring_furl = tub.registerReference(Referenceable()) c.publish("boring", make_ann(boring_furl), privkey) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_connected(ign): def _connected(): for c in clients: if not c.connected_to_introducer(): return False return True return self.poll(_connected) # we watch the clients to determine when the system has settled down. # Then we can look inside the server to assert things about its # state. def _wait_for_expected_announcements(ign): def _got_expected_announcements(): for i,c in enumerate(subscribing_clients): if len(received_announcements[c]) < expected_announcements[i]: return False return True return self.poll(_got_expected_announcements) # before shutting down any Tub, we'd like to know that there are no # messages outstanding def _wait_until_idle(ign): def _idle(): for c in subscribing_clients + publishing_clients: if c._debug_outstanding: return False if self.the_introducer._debug_outstanding: return False return True return self.poll(_idle) d = defer.succeed(None) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") dc = self.the_introducer._debug_counts # each storage server publishes a record. There is also one # "boring" self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) # the number of outbound messages is tricky.. I think it depends # upon a race between the publish and the subscribe messages. self.failUnless(dc["outbound_message"] > 0) # each client subscribes to "storage", and each server publishes self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_STORAGE) serverid0 = printable_serverids[0] ann = anns[serverid0] nick = ann["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, NICKNAME % "0") for c in publishing_clients: cdc = c._debug_counts expected = 1 if c in [clients[2], # boring ]: expected = 2 self.failUnlessEqual(cdc["outbound_message"], expected) # now check the web status, make sure it renders without error ir = introweb.IntroducerRoot(self.parent) self.parent.nodeid = "NODEID" text = ir.renderSynchronously().decode("utf-8") self.failUnlessIn(NICKNAME % "0", text) # a v2 client self.failUnlessIn(NICKNAME % "1", text) # another v2 client for i in range(NUM_STORAGE): self.failUnlessIn(printable_serverids[i], text, (i,printable_serverids[i],text)) # make sure there isn't a double-base32ed string too self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text, (i,printable_serverids[i],text)) log.msg("_check1 done") d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(ign): def _introducer_lost(): for c in clients: if c.connected_to_introducer(): return False return True return self.poll(_introducer_lost) d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 for k in self.the_introducer._debug_counts: self.the_introducer._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) d.addCallback(lambda _ign: log.msg(" reconnected")) # TODO: publish something while the introducer is offline, then # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["inbound_message"], 1) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(_wait_for_introducer_loss) d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone introducer = IntroducerService() self.the_introducer = introducer newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"] > 0) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d
def test_client_cache(self): basedir = "introducer/ClientSeqnums/test_client_cache_1" fileutil.make_dirs(basedir) cache_filepath = FilePath(os.path.join(basedir, "private", "introducer_default_cache.yaml")) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. f = open(os.path.join(basedir, "tahoe.cfg"), "w") f.write("[client]\n") f.write("introducer.furl = nope\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() c = TahoeClient(basedir) ic = c.introducer_clients[0] sk_s, vk_s = keyutil.make_keypair() sk, _ignored = keyutil.parse_privkey(sk_s) pub1 = keyutil.remove_prefix(vk_s, "pub-") furl1 = "pb://[email protected]:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, sk, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + "er" ann_t2 = make_ann_t(ic, furl2, sk, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache sk_s2, vk_s2 = keyutil.make_keypair() sk2, _ignored = keyutil.parse_privkey(sk_s2) pub2 = keyutil.remove_prefix(vk_s2, "pub-") furl3 = "pb://[email protected]:456/short" ann_t3 = make_ann_t(ic, furl3, sk2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(set([pub1, pub2]), set([a["key_s"] for a in announcements])) self.failUnlessEqual(set([furl2, furl3]), set([a["ann"]["anonymous-storage-FURL"] for a in announcements])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}, fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(pub1 in announcements) self.failUnlessEqual(announcements[pub1]["anonymous-storage-FURL"], furl2) self.failUnlessEqual(announcements[pub2]["anonymous-storage-FURL"], furl3) c2 = TahoeClient(basedir) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([pub1, pub2]))
def test_client_cache(self): basedir = "introducer/ClientSeqnums/test_client_cache_1" fileutil.make_dirs(basedir) cache_filepath = FilePath( os.path.join(basedir, "private", "introducer_default_cache.yaml")) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. f = open(os.path.join(basedir, "tahoe.cfg"), "w") f.write("[client]\n") f.write("introducer.furl = nope\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() c = create_client(basedir) ic = c.introducer_clients[0] sk_s, vk_s = keyutil.make_keypair() sk, _ignored = keyutil.parse_privkey(sk_s) pub1 = keyutil.remove_prefix(vk_s, "pub-") furl1 = "pb://[email protected]:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, sk, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + "er" ann_t2 = make_ann_t(ic, furl2, sk, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache sk_s2, vk_s2 = keyutil.make_keypair() sk2, _ignored = keyutil.parse_privkey(sk_s2) pub2 = keyutil.remove_prefix(vk_s2, "pub-") furl3 = "pb://[email protected]:456/short" ann_t3 = make_ann_t(ic, furl3, sk2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(set([pub1, pub2]), set([a["key_s"] for a in announcements])) self.failUnlessEqual( set([furl2, furl3]), set([a["ann"]["anonymous-storage-FURL"] for a in announcements])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}, fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(pub1 in announcements) self.failUnlessEqual(announcements[pub1]["anonymous-storage-FURL"], furl2) self.failUnlessEqual(announcements[pub2]["anonymous-storage-FURL"], furl3) c2 = create_client(basedir) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([pub1, pub2]))
def test_duplicate_receive_v2(self): ic1 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver23", "oldest_version", {}, fakeseq, FilePath(self.mktemp())) # we use a second client just to create a different-looking # announcement ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver24","oldest_version",{}, fakeseq, FilePath(self.mktemp())) announcements = [] def _received(key_s, ann): announcements.append( (key_s, ann) ) ic1.subscribe_to("storage", _received) furl1 = "pb://[email protected]:36106/gydnp" furl1a = "pb://[email protected]:7777/gydnp" furl2 = "pb://[email protected]:36106/ttwwoo" privkey_s, pubkey_vs = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) pubkey_s = keyutil.remove_prefix(pubkey_vs, "pub-") # ann1: ic1, furl1 # ann1a: ic1, furl1a (same SturdyRef, different connection hints) # ann1b: ic2, furl1 # ann2: ic2, furl2 self.ann1 = make_ann_t(ic1, furl1, privkey, seqnum=10) self.ann1old = make_ann_t(ic1, furl1, privkey, seqnum=9) self.ann1noseqnum = make_ann_t(ic1, furl1, privkey, seqnum=None) self.ann1b = make_ann_t(ic2, furl1, privkey, seqnum=11) self.ann1a = make_ann_t(ic1, furl1a, privkey, seqnum=12) self.ann2 = make_ann_t(ic2, furl2, privkey, seqnum=13) ic1.remote_announce_v2([self.ann1]) # queues eventual-send d = fireEventually() def _then1(ign): self.failUnlessEqual(len(announcements), 1) key_s,ann = announcements[0] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then1) # now send a duplicate announcement. This should not fire the # subscriber d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1])) d.addCallback(fireEventually) def _then2(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2) # an older announcement shouldn't fire the subscriber either d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1old])) d.addCallback(fireEventually) def _then2a(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2a) # announcement with no seqnum cannot replace one with-seqnum d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1noseqnum])) d.addCallback(fireEventually) def _then2b(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2b) # and a replacement announcement: same FURL, new other stuff. The # subscriber *should* be fired. d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1b])) d.addCallback(fireEventually) def _then3(ign): self.failUnlessEqual(len(announcements), 2) key_s,ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver24") d.addCallback(_then3) # and a replacement announcement with a different FURL (it uses # different connection hints) d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1a])) d.addCallback(fireEventually) def _then4(ign): self.failUnlessEqual(len(announcements), 3) key_s,ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then4) # now add a new subscription, which should be called with the # backlog. The introducer only records one announcement per index, so # the backlog will only have the latest message. announcements2 = [] def _received2(key_s, ann): announcements2.append( (key_s, ann) ) d.addCallback(lambda ign: ic1.subscribe_to("storage", _received2)) d.addCallback(fireEventually) def _then5(ign): self.failUnlessEqual(len(announcements2), 1) key_s,ann = announcements2[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then5) return d
def do_system_test(self): self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl # we have 5 clients who publish themselves as storage servers, and a # sixth which does which not. All 6 clients subscriber to hear about # storage. When the connections are fully established, all six nodes # should have 5 connections each. NUM_STORAGE = 5 NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} subscribing_clients = [] publishing_clients = [] printable_serverids = {} self.the_introducer = introducer privkeys = {} pubkeys = {} expected_announcements = [0 for c in range(NUM_CLIENTS)] for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i), "version", "oldest", {"component": "component-v1"}, fakeseq, FilePath(self.mktemp())) received_announcements[c] = {} def got(key_s_or_tubid, ann, announcements): index = key_s_or_tubid or get_tubid_string_from_ann(ann) announcements[index] = ann c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) expected_announcements[ i] += 1 # all expect a 'storage' announcement node_furl = tub.registerReference(Referenceable()) privkey_s, pubkey_s = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) privkeys[i] = privkey pubkeys[i] = pubkey_s if i < NUM_STORAGE: # sign all announcements c.publish("storage", make_ann(node_furl), privkey) assert pubkey_s.startswith("pub-") printable_serverids[i] = pubkey_s[len("pub-"):] publishing_clients.append(c) else: # the last one does not publish anything pass if i == 2: # also publish something that nobody cares about boring_furl = tub.registerReference(Referenceable()) c.publish("boring", make_ann(boring_furl), privkey) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_connected(ign): def _connected(): for c in clients: if not c.connected_to_introducer(): return False return True return self.poll(_connected) # we watch the clients to determine when the system has settled down. # Then we can look inside the server to assert things about its # state. def _wait_for_expected_announcements(ign): def _got_expected_announcements(): for i, c in enumerate(subscribing_clients): if len(received_announcements[c] ) < expected_announcements[i]: return False return True return self.poll(_got_expected_announcements) # before shutting down any Tub, we'd like to know that there are no # messages outstanding def _wait_until_idle(ign): def _idle(): for c in subscribing_clients + publishing_clients: if c._debug_outstanding: return False if self.the_introducer._debug_outstanding: return False return True return self.poll(_idle) d = defer.succeed(None) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") dc = self.the_introducer._debug_counts # each storage server publishes a record. There is also one # "boring" self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 1) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) # the number of outbound messages is tricky.. I think it depends # upon a race between the publish and the subscribe messages. self.failUnless(dc["outbound_message"] > 0) # each client subscribes to "storage", and each server publishes self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_STORAGE) serverid0 = printable_serverids[0] ann = anns[serverid0] nick = ann["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, NICKNAME % "0") for c in publishing_clients: cdc = c._debug_counts expected = 1 if c in [ clients[2], # boring ]: expected = 2 self.failUnlessEqual(cdc["outbound_message"], expected) # now check the web status, make sure it renders without error ir = introweb.IntroducerRoot(self.parent) self.parent.nodeid = "NODEID" text = ir.renderSynchronously().decode("utf-8") self.failUnlessIn(NICKNAME % "0", text) # a v2 client self.failUnlessIn(NICKNAME % "1", text) # another v2 client for i in range(NUM_STORAGE): self.failUnlessIn(printable_serverids[i], text, (i, printable_serverids[i], text)) # make sure there isn't a double-base32ed string too self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text, (i, printable_serverids[i], text)) log.msg("_check1 done") d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(ign): def _introducer_lost(): for c in clients: if c.connected_to_introducer(): return False return True return self.poll(_introducer_lost) d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 for k in self.the_introducer._debug_counts: self.the_introducer._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) d.addCallback(lambda _ign: log.msg(" reconnected")) # TODO: publish something while the introducer is offline, then # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["inbound_message"], 1) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(_wait_for_introducer_loss) d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone introducer = IntroducerService() self.the_introducer = introducer newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"] > 0) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d
def test_duplicate_receive_v2(self): ic1 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver23", "oldest_version", {}, fakeseq, FilePath(self.mktemp())) # we use a second client just to create a different-looking # announcement ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver24", "oldest_version", {}, fakeseq, FilePath(self.mktemp())) announcements = [] def _received(key_s, ann): announcements.append((key_s, ann)) ic1.subscribe_to("storage", _received) furl1 = "pb://[email protected]:36106/gydnp" furl1a = "pb://[email protected]:7777/gydnp" furl2 = "pb://[email protected]:36106/ttwwoo" privkey_s, pubkey_vs = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) pubkey_s = keyutil.remove_prefix(pubkey_vs, "pub-") # ann1: ic1, furl1 # ann1a: ic1, furl1a (same SturdyRef, different connection hints) # ann1b: ic2, furl1 # ann2: ic2, furl2 self.ann1 = make_ann_t(ic1, furl1, privkey, seqnum=10) self.ann1old = make_ann_t(ic1, furl1, privkey, seqnum=9) self.ann1noseqnum = make_ann_t(ic1, furl1, privkey, seqnum=None) self.ann1b = make_ann_t(ic2, furl1, privkey, seqnum=11) self.ann1a = make_ann_t(ic1, furl1a, privkey, seqnum=12) self.ann2 = make_ann_t(ic2, furl2, privkey, seqnum=13) ic1.remote_announce_v2([self.ann1]) # queues eventual-send d = fireEventually() def _then1(ign): self.failUnlessEqual(len(announcements), 1) key_s, ann = announcements[0] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then1) # now send a duplicate announcement. This should not fire the # subscriber d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1])) d.addCallback(fireEventually) def _then2(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2) # an older announcement shouldn't fire the subscriber either d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1old])) d.addCallback(fireEventually) def _then2a(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2a) # announcement with no seqnum cannot replace one with-seqnum d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1noseqnum])) d.addCallback(fireEventually) def _then2b(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2b) # and a replacement announcement: same FURL, new other stuff. The # subscriber *should* be fired. d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1b])) d.addCallback(fireEventually) def _then3(ign): self.failUnlessEqual(len(announcements), 2) key_s, ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver24") d.addCallback(_then3) # and a replacement announcement with a different FURL (it uses # different connection hints) d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1a])) d.addCallback(fireEventually) def _then4(ign): self.failUnlessEqual(len(announcements), 3) key_s, ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then4) # now add a new subscription, which should be called with the # backlog. The introducer only records one announcement per index, so # the backlog will only have the latest message. announcements2 = [] def _received2(key_s, ann): announcements2.append((key_s, ann)) d.addCallback(lambda ign: ic1.subscribe_to("storage", _received2)) d.addCallback(fireEventually) def _then5(ign): self.failUnlessEqual(len(announcements2), 1) key_s, ann = announcements2[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then5) return d
def do_system_test(self, create_introducer): self.create_tub() introducer = create_introducer() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl NUMCLIENTS = 5 # we have 5 clients who publish themselves, and an extra one does # which not. When the connections are fully established, all six nodes # should have 5 connections each. clients = [] tubs = {} received_announcements = {} NUM_SERVERS = NUMCLIENTS subscribing_clients = [] publishing_clients = [] for i in range(NUMCLIENTS+1): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i, "version", "oldest") received_announcements[c] = {} def got(serverid, ann_d, announcements): announcements[serverid] = ann_d c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) if i < NUMCLIENTS: node_furl = tub.registerReference(Referenceable()) c.publish(node_furl, "storage", "ri_name") publishing_clients.append(c) # the last one does not publish anything c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_all_connections(): for c in subscribing_clients: if len(received_announcements[c]) < NUM_SERVERS: return False return True d = self.poll(_wait_for_all_connections) def _check1(res): log.msg("doing _check1") dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnless(dc["outbound_message"]) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_SERVERS) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_SERVERS) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_SERVERS) nodeid0 = b32decode(tubs[clients[0]].tubID.upper()) ann_d = anns[nodeid0] nick = ann_d["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, u"nickname-0") for c in publishing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["outbound_message"], 1) d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(): for c in clients: if c.connected_to_introducer(): return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") dc = introducer._debug_counts self.expected_count = dc["inbound_message"] + NUM_SERVERS self.expected_subscribe_count = dc["inbound_subscribe"] + NUMCLIENTS+1 introducer._debug0 = dc["outbound_message"] for c in subscribing_clients: cdc = c._debug_counts c._debug0 = cdc["inbound_message"] self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) def _wait_for_introducer_reconnect(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent (duplicate) announcements to all of them # all clients have received (duplicate) announcements dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_message"] < c._debug0+1: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect)) def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], 2*NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["outbound_message"], introducer._debug0 + len(subscribing_clients)) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) for c in subscribing_clients: # record some counters for later comparison. Stash the values # on the client itself, because I'm lazy. cdc = c._debug_counts c._debug1 = cdc["inbound_announcement"] c._debug2 = cdc["inbound_message"] c._debug3 = cdc["new_announcement"] newintroducer = create_introducer() self.expected_message_count = NUM_SERVERS self.expected_announcement_count = NUM_SERVERS*len(subscribing_clients) self.expected_subscribe_count = len(subscribing_clients) newfurl = self.central_tub.registerReference(newintroducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) def _wait_for_introducer_reconnect2(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent announcements for everybody to everybody # all clients have received all the (duplicate) announcements # at that point, the system should be quiescent dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_message_count: return False if dc["outbound_announcements"] < self.expected_announcement_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_announcement"] < c._debug1+NUM_SERVERS: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2)) def _check3(res): log.msg("doing _check3") for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_announcement"] > c._debug1) self.failUnless(cdc["inbound_message"] > c._debug2) # there should have been no new announcements self.failUnlessEqual(cdc["new_announcement"], c._debug3) # and the right number of duplicate ones. There were # NUM_SERVERS from the servertub restart, and there should be # another NUM_SERVERS now self.failUnlessEqual(cdc["duplicate_announcement"], 2*NUM_SERVERS) d.addCallback(_check3) return d
def do_system_test(self, create_introducer): self.create_tub() introducer = create_introducer() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl NUMCLIENTS = 5 # we have 5 clients who publish themselves, and an extra one does # which not. When the connections are fully established, all six nodes # should have 5 connections each. clients = [] tubs = {} received_announcements = {} NUM_SERVERS = NUMCLIENTS subscribing_clients = [] publishing_clients = [] for i in range(NUMCLIENTS + 1): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i, "version", "oldest") received_announcements[c] = {} def got(serverid, ann_d, announcements): announcements[serverid] = ann_d c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) if i < NUMCLIENTS: node_furl = tub.registerReference(Referenceable()) c.publish(node_furl, "storage", "ri_name") publishing_clients.append(c) # the last one does not publish anything c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_all_connections(): for c in subscribing_clients: if len(received_announcements[c]) < NUM_SERVERS: return False return True d = self.poll(_wait_for_all_connections) def _check1(res): log.msg("doing _check1") dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnless(dc["outbound_message"]) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_SERVERS) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_SERVERS) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_SERVERS) nodeid0 = b32decode(tubs[clients[0]].tubID.upper()) ann_d = anns[nodeid0] nick = ann_d["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, u"nickname-0") for c in publishing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["outbound_message"], 1) d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(): for c in clients: if c.connected_to_introducer(): return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") dc = introducer._debug_counts self.expected_count = dc["inbound_message"] + NUM_SERVERS self.expected_subscribe_count = dc[ "inbound_subscribe"] + NUMCLIENTS + 1 introducer._debug0 = dc["outbound_message"] for c in subscribing_clients: cdc = c._debug_counts c._debug0 = cdc["inbound_message"] self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) def _wait_for_introducer_reconnect(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent (duplicate) announcements to all of them # all clients have received (duplicate) announcements dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_message"] < c._debug0 + 1: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect)) def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], 2 * NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["outbound_message"], introducer._debug0 + len(subscribing_clients)) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) for c in subscribing_clients: # record some counters for later comparison. Stash the values # on the client itself, because I'm lazy. cdc = c._debug_counts c._debug1 = cdc["inbound_announcement"] c._debug2 = cdc["inbound_message"] c._debug3 = cdc["new_announcement"] newintroducer = create_introducer() self.expected_message_count = NUM_SERVERS self.expected_announcement_count = NUM_SERVERS * len( subscribing_clients) self.expected_subscribe_count = len(subscribing_clients) newfurl = self.central_tub.registerReference(newintroducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) def _wait_for_introducer_reconnect2(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent announcements for everybody to everybody # all clients have received all the (duplicate) announcements # at that point, the system should be quiescent dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_message_count: return False if dc["outbound_announcements"] < self.expected_announcement_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_announcement"] < c._debug1 + NUM_SERVERS: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2)) def _check3(res): log.msg("doing _check3") for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_announcement"] > c._debug1) self.failUnless(cdc["inbound_message"] > c._debug2) # there should have been no new announcements self.failUnlessEqual(cdc["new_announcement"], c._debug3) # and the right number of duplicate ones. There were # NUM_SERVERS from the servertub restart, and there should be # another NUM_SERVERS now self.failUnlessEqual(cdc["duplicate_announcement"], 2 * NUM_SERVERS) d.addCallback(_check3) return d
def test_client_cache(self): """ Announcements received by an introducer client are written to that introducer client's cache file. """ basedir = FilePath("introducer/ClientSeqnums/test_client_cache_1") private = basedir.child("private") private.makedirs() write_introducer(basedir, "default", "nope") cache_filepath = basedir.descendant([ "private", "introducer_default_cache.yaml", ]) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. with basedir.child("tahoe.cfg").open("w") as f: f.write(b"[storage]\n") f.write(b"enabled = false\n") c = yield create_client(basedir.path) ic = c.introducer_clients[0] private_key, public_key = ed25519.create_signing_keypair() public_key_str = remove_prefix( ed25519.string_from_verifying_key(public_key), b"pub-") furl1 = b"pb://[email protected]:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, private_key, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]), furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + b"er" ann_t2 = make_ann_t(ic, furl2, private_key, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]), furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache private_key2, public_key2 = ed25519.create_signing_keypair() public_key_str2 = remove_prefix( ed25519.string_from_verifying_key(public_key2), b"pub-") furl3 = b"pb://[email protected]:456/short" ann_t3 = make_ann_t(ic, furl3, private_key2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual( set([public_key_str, public_key_str2]), set([ensure_binary(a["key_s"]) for a in announcements])) self.failUnlessEqual( set([furl2, furl3]), set([ ensure_binary(a["ann"]["anonymous-storage-FURL"]) for a in announcements ])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(public_key_str in announcements) self.failUnlessEqual( ensure_binary( announcements[public_key_str]["anonymous-storage-FURL"]), furl2) self.failUnlessEqual( ensure_binary( announcements[public_key_str2]["anonymous-storage-FURL"]), furl3) c2 = yield create_client(basedir.path) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([public_key_str, public_key_str2]))