Ejemplo n.º 1
0
    def test_client_v2_signed(self):
        introducer = IntroducerService()
        tub = introducer_furl = None
        app_versions = {"whizzy": "fizzy"}
        client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2",
                                     "my_version", "oldest", app_versions,
                                     fakeseq, FilePath(self.mktemp()))
        furl1 = "pb://[email protected]:0/swissnum"

        private_key, public_key = ed25519.create_signing_keypair()
        public_key_str = remove_prefix(
            ed25519.string_from_verifying_key(public_key), "pub-")

        ann_t0 = make_ann_t(client_v2, furl1, private_key, 10)
        canary0 = Referenceable()
        introducer.remote_publish_v2(ann_t0, canary0)
        a = introducer.get_announcements()
        self.failUnlessEqual(len(a), 1)
        self.assertThat(a[0].canary, Is(canary0))
        self.failUnlessEqual(a[0].index, ("storage", public_key_str))
        self.failUnlessEqual(a[0].announcement["app-versions"], app_versions)
        self.failUnlessEqual(a[0].nickname, u"nick-v2")
        self.failUnlessEqual(a[0].service_name, "storage")
        self.failUnlessEqual(a[0].version, "my_version")
        self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"],
                             furl1)
Ejemplo n.º 2
0
 def test_remove_prefix_entire_string(self):
     """
     removing a prefix which is the whole string is empty
     """
     self.assertEquals(
         remove_prefix(b"foobar", b"foobar"),
         b"",
     )
Ejemplo n.º 3
0
 def test_remove_prefix_zero(self):
     """
     removing a zero-length prefix does nothing
     """
     self.assertEquals(
         remove_prefix(b"foobar", b""),
         b"foobar",
     )
Ejemplo n.º 4
0
def verifying_key_from_string(public_key_bytes):
    """
    Load a verifying key from a string of bytes (which includes the
    PUBLIC_KEY_PREFIX)

    :returns: a public_key
    """
    if not isinstance(public_key_bytes, six.binary_type):
        raise ValueError('public_key_bytes must be bytes')

    return Ed25519PublicKey.from_public_bytes(
        a2b(remove_prefix(public_key_bytes, PUBLIC_KEY_PREFIX)))
Ejemplo n.º 5
0
def signing_keypair_from_string(private_key_bytes):
    """
    Load a signing keypair from a string of bytes (which includes the
    PRIVATE_KEY_PREFIX)

    :returns: a 2-tuple of (private_key, public_key)
    """

    if not isinstance(private_key_bytes, six.binary_type):
        raise ValueError('private_key_bytes must be bytes')

    private_key = Ed25519PrivateKey.from_private_bytes(
        a2b(remove_prefix(private_key_bytes, PRIVATE_KEY_PREFIX)))
    return private_key, private_key.public_key()
Ejemplo n.º 6
0
def unsign_from_foolscap(ann_t):
    (msg, sig_vs, claimed_key_vs) = ann_t
    if not sig_vs or not claimed_key_vs:
        raise UnknownKeyError("only signed announcements recognized")
    if not sig_vs.startswith(b"v0-"):
        raise UnknownKeyError("only v0- signatures recognized")
    if not claimed_key_vs.startswith(b"v0-"):
        raise UnknownKeyError("only v0- keys recognized")

    claimed_key = ed25519.verifying_key_from_string(b"pub-" + claimed_key_vs)
    sig_bytes = base32.a2b(remove_prefix(sig_vs, b"v0-"))
    ed25519.verify_signature(claimed_key, sig_bytes, msg)
    key_vs = claimed_key_vs
    ann = json.loads(msg.decode("utf-8"))
    return (ann, key_vs)
Ejemplo n.º 7
0
def sign_to_foolscap(announcement, signing_key):
    """
    :param signing_key: a (private) signing key, as returned from
        e.g. :func:`allmydata.crypto.ed25519.signing_keypair_from_string`

    :returns: 3-tuple of (msg, sig, vk) where msg is a UTF8 JSON
        serialization of the `announcement` (bytes), sig is bytes (a
        signature of msg) and vk is the verifying key bytes
    """
    # return (bytes, sig-str, pubkey-str). A future HTTP-based serialization
    # will use JSON({msg:b64(JSON(msg).utf8), sig:v0-b64(sig),
    # pubkey:v0-b64(pubkey)}) .
    msg = json.dumps(announcement).encode("utf-8")
    sig = b"v0-" + base32.b2a(ed25519.sign_data(signing_key, msg))
    verifying_key_string = ed25519.string_from_verifying_key(
        ed25519.verifying_key_from_signing_key(signing_key))
    ann_t = (msg, sig, remove_prefix(verifying_key_string, b"pub-"))
    return ann_t
Ejemplo n.º 8
0
 def _init_permutation_seed(self, ss):
     seed = self.config.get_config_from_file("permutation-seed")
     if not seed:
         have_shares = ss.have_shares()
         if have_shares:
             # if the server has shares but not a recorded
             # permutation-seed, then it has been around since pre-#466
             # days, and the clients who uploaded those shares used our
             # TubID as a permutation-seed. We should keep using that same
             # seed to keep the shares in the same place in the permuted
             # ring, so those clients don't have to perform excessive
             # searches.
             seed = base32.b2a(self.nodeid)
         else:
             # otherwise, we're free to use the more natural seed of our
             # pubkey-based serverid
             vk_string = ed25519.string_from_verifying_key(self._node_public_key)
             vk_bytes = remove_prefix(vk_string, ed25519.PUBLIC_KEY_PREFIX)
             seed = base32.b2a(vk_bytes)
         self.config.write_config_file("permutation-seed", seed+b"\n", mode="wb")
     return seed.strip()
Ejemplo n.º 9
0
    def test_client_cache(self):
        basedir = "introducer/ClientSeqnums/test_client_cache_1"
        fileutil.make_dirs(basedir)
        cache_filepath = FilePath(
            os.path.join(basedir, "private", "introducer_default_cache.yaml"))

        # if storage is enabled, the Client will publish its storage server
        # during startup (although the announcement will wait in a queue
        # until the introducer connection is established). To avoid getting
        # confused by this, disable storage.
        with open(os.path.join(basedir, "tahoe.cfg"), "w") as f:
            f.write("[client]\n")
            f.write("introducer.furl = nope\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")

        c = yield create_client(basedir)
        ic = c.introducer_clients[0]
        private_key, public_key = ed25519.create_signing_keypair()
        public_key_str = remove_prefix(
            ed25519.string_from_verifying_key(public_key), "pub-")
        furl1 = "pb://[email protected]:123/short"  # base32("short")
        ann_t = make_ann_t(ic, furl1, private_key, 1)

        ic.got_announcements([ann_t])
        yield flushEventualQueue()

        # check the cache for the announcement
        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 1)
        self.failUnlessEqual(announcements[0]['key_s'], public_key_str)
        ann = announcements[0]["ann"]
        self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1)
        self.failUnlessEqual(ann["seqnum"], 1)

        # a new announcement that replaces the first should replace the
        # cached entry, not duplicate it
        furl2 = furl1 + "er"
        ann_t2 = make_ann_t(ic, furl2, private_key, 2)
        ic.got_announcements([ann_t2])
        yield flushEventualQueue()
        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 1)
        self.failUnlessEqual(announcements[0]['key_s'], public_key_str)
        ann = announcements[0]["ann"]
        self.failUnlessEqual(ann["anonymous-storage-FURL"], furl2)
        self.failUnlessEqual(ann["seqnum"], 2)

        # but a third announcement with a different key should add to the
        # cache
        private_key2, public_key2 = ed25519.create_signing_keypair()
        public_key_str2 = remove_prefix(
            ed25519.string_from_verifying_key(public_key2), "pub-")
        furl3 = "pb://[email protected]:456/short"
        ann_t3 = make_ann_t(ic, furl3, private_key2, 1)
        ic.got_announcements([ann_t3])
        yield flushEventualQueue()

        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 2)
        self.failUnlessEqual(set([public_key_str, public_key_str2]),
                             set([a["key_s"] for a in announcements]))
        self.failUnlessEqual(
            set([furl2, furl3]),
            set([a["ann"]["anonymous-storage-FURL"] for a in announcements]))

        # test loading
        yield flushEventualQueue()
        ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname",
                               "my_version", "oldest_version", {}, fakeseq,
                               ic._cache_filepath)
        announcements = {}

        def got(key_s, ann):
            announcements[key_s] = ann

        ic2.subscribe_to("storage", got)
        ic2._load_announcements()  # normally happens when connection fails
        yield flushEventualQueue()

        self.failUnless(public_key_str in announcements)
        self.failUnlessEqual(
            announcements[public_key_str]["anonymous-storage-FURL"], furl2)
        self.failUnlessEqual(
            announcements[public_key_str2]["anonymous-storage-FURL"], furl3)

        c2 = yield create_client(basedir)
        c2.introducer_clients[0]._load_announcements()
        yield flushEventualQueue()
        self.assertEqual(c2.storage_broker.get_all_serverids(),
                         frozenset([public_key_str, public_key_str2]))
Ejemplo n.º 10
0
    def do_system_test(self):
        self.create_tub()
        introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        pubkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            listenOnUnused(tub)
            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i),
                                 "version", "oldest",
                                 {"component": "component-v1"}, fakeseq,
                                 FilePath(self.mktemp()))
            received_announcements[c] = {}

            def got(key_s_or_tubid, ann, announcements):
                index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann

            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)
            expected_announcements[
                i] += 1  # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            private_key, public_key = ed25519.create_signing_keypair()
            public_key_str = ed25519.string_from_verifying_key(public_key)
            privkeys[i] = private_key
            pubkeys[i] = public_key_str

            if i < NUM_STORAGE:
                # sign all announcements
                c.publish("storage", make_ann(node_furl), private_key)
                printable_serverids[i] = remove_prefix(public_key_str, b"pub-")
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl), private_key)

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True

            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i, c in enumerate(subscribing_clients):
                    if len(received_announcements[c]
                           ) < expected_announcements[i]:
                        return False
                return True

            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True

            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [
                        clients[2],  # boring
                ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.assertIn(NICKNAME % "0", text)  # a v2 client
            self.assertIn(NICKNAME % "1", text)  # another v2 client
            for i in range(NUM_STORAGE):
                self.assertIn(printable_serverids[i], text,
                              (i, printable_serverids[i], text))
                # make sure there isn't a double-base32ed string too
                self.assertNotIn(idlib.nodeid_b2a(printable_serverids[i]),
                                 text, (i, printable_serverids[i], text))
            log.msg("_check1 done")

        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True

            return self.poll(_introducer_lost)

        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check3)
        return d
Ejemplo n.º 11
0
    def test_duplicate_receive_v2(self):
        ic1 = IntroducerClient(None, "introducer.furl", u"my_nickname",
                               "ver23", "oldest_version", {}, fakeseq,
                               FilePath(self.mktemp()))
        # we use a second client just to create a different-looking
        # announcement
        ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname",
                               "ver24", "oldest_version", {}, fakeseq,
                               FilePath(self.mktemp()))
        announcements = []

        def _received(key_s, ann):
            announcements.append((key_s, ann))

        ic1.subscribe_to("storage", _received)
        furl1 = "pb://[email protected]:36106/gydnp"
        furl1a = "pb://[email protected]:7777/gydnp"
        furl2 = "pb://[email protected]:36106/ttwwoo"

        private_key, public_key = ed25519.create_signing_keypair()
        public_key_str = ed25519.string_from_verifying_key(public_key)
        pubkey_s = remove_prefix(public_key_str, "pub-")

        # ann1: ic1, furl1
        # ann1a: ic1, furl1a (same SturdyRef, different connection hints)
        # ann1b: ic2, furl1
        # ann2: ic2, furl2

        self.ann1 = make_ann_t(ic1, furl1, private_key, seqnum=10)
        self.ann1old = make_ann_t(ic1, furl1, private_key, seqnum=9)
        self.ann1noseqnum = make_ann_t(ic1, furl1, private_key, seqnum=None)
        self.ann1b = make_ann_t(ic2, furl1, private_key, seqnum=11)
        self.ann1a = make_ann_t(ic1, furl1a, private_key, seqnum=12)
        self.ann2 = make_ann_t(ic2, furl2, private_key, seqnum=13)

        ic1.remote_announce_v2([self.ann1])  # queues eventual-send
        d = fireEventually()

        def _then1(ign):
            self.failUnlessEqual(len(announcements), 1)
            key_s, ann = announcements[0]
            self.failUnlessEqual(key_s, pubkey_s)
            self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1)
            self.failUnlessEqual(ann["my-version"], "ver23")

        d.addCallback(_then1)

        # now send a duplicate announcement. This should not fire the
        # subscriber
        d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1]))
        d.addCallback(fireEventually)

        def _then2(ign):
            self.failUnlessEqual(len(announcements), 1)

        d.addCallback(_then2)

        # an older announcement shouldn't fire the subscriber either
        d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1old]))
        d.addCallback(fireEventually)

        def _then2a(ign):
            self.failUnlessEqual(len(announcements), 1)

        d.addCallback(_then2a)

        # announcement with no seqnum cannot replace one with-seqnum
        d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1noseqnum]))
        d.addCallback(fireEventually)

        def _then2b(ign):
            self.failUnlessEqual(len(announcements), 1)

        d.addCallback(_then2b)

        # and a replacement announcement: same FURL, new other stuff. The
        # subscriber *should* be fired.
        d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1b]))
        d.addCallback(fireEventually)

        def _then3(ign):
            self.failUnlessEqual(len(announcements), 2)
            key_s, ann = announcements[-1]
            self.failUnlessEqual(key_s, pubkey_s)
            self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1)
            self.failUnlessEqual(ann["my-version"], "ver24")

        d.addCallback(_then3)

        # and a replacement announcement with a different FURL (it uses
        # different connection hints)
        d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1a]))
        d.addCallback(fireEventually)

        def _then4(ign):
            self.failUnlessEqual(len(announcements), 3)
            key_s, ann = announcements[-1]
            self.failUnlessEqual(key_s, pubkey_s)
            self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a)
            self.failUnlessEqual(ann["my-version"], "ver23")

        d.addCallback(_then4)

        # now add a new subscription, which should be called with the
        # backlog. The introducer only records one announcement per index, so
        # the backlog will only have the latest message.
        announcements2 = []

        def _received2(key_s, ann):
            announcements2.append((key_s, ann))

        d.addCallback(lambda ign: ic1.subscribe_to("storage", _received2))
        d.addCallback(fireEventually)

        def _then5(ign):
            self.failUnlessEqual(len(announcements2), 1)
            key_s, ann = announcements2[-1]
            self.failUnlessEqual(key_s, pubkey_s)
            self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a)
            self.failUnlessEqual(ann["my-version"], "ver23")

        d.addCallback(_then5)
        return d
Ejemplo n.º 12
0
 def get_long_nodeid(self):
     # this matches what IServer.get_longname() says about us elsewhere
     vk_string = ed25519.string_from_verifying_key(self._node_public_key)
     return remove_prefix(vk_string, "pub-")
Ejemplo n.º 13
0
 def test_remove_prefix_partial(self):
     """
     removing a prefix with only partial match fails with exception
     """
     with self.assertRaises(BadPrefixError):
         remove_prefix(b"foobar", b"fooz"),
Ejemplo n.º 14
0
 def test_remove_prefix_bad(self):
     """
     attempt to remove a prefix that doesn't exist fails with exception
     """
     with self.assertRaises(BadPrefixError):
         remove_prefix(b"foobar", b"bar")
Ejemplo n.º 15
0
 def test_remove_prefix_good(self):
     """
     remove a simple prefix properly
     """
     self.assertEquals(remove_prefix(b"foobar", b"foo"), b"bar")