Пример #1
0
 def test_allocate_buckets_repeat(self):
     """
     ``IStorageServer.allocate_buckets()`` with the same storage index does not return
     work-in-progress buckets, but will add any newly added buckets.
     """
     storage_index, renew_secret, cancel_secret = (
         new_storage_index(),
         new_secret(),
         new_secret(),
     )
     (already_got, allocated) = yield self.storage_client.allocate_buckets(
         storage_index,
         renew_secret,
         cancel_secret,
         sharenums=set(range(4)),
         allocated_size=1024,
         canary=Referenceable(),
     )
     (already_got2,
      allocated2) = yield self.storage_client.allocate_buckets(
          storage_index,
          renew_secret,
          cancel_secret,
          set(range(5)),
          1024,
          Referenceable(),
      )
     self.assertEqual(already_got, already_got2)
     self.assertEqual(set(allocated2.keys()), {4})
Пример #2
0
    def test_tubid_check(self):
        t1 = Tub()  # gets a new key
        t1.setServiceParent(self.s)
        l = t1.listenOn("tcp:0:interface=127.0.0.1")
        t1.setLocation("127.0.0.1:%d" % l.getPortnum())
        port1 = "tcp:%d:interface=127.0.0.1" % l.getPortnum()
        r1 = Referenceable()
        ffn = "test_tub.FurlFile.test_tubid_check.furlfile"
        furl1 = t1.registerReference(r1, furlFile=ffn)
        d = defer.maybeDeferred(t1.disownServiceParent)

        self.failUnless(os.path.exists(ffn))
        self.failUnlessEqual(furl1, open(ffn, "r").read().strip())

        def _take2(res):
            t2 = Tub()  # gets a different key
            t2.setServiceParent(self.s)
            l = t2.listenOn(port1)
            t2.setLocation("127.0.0.1:%d" % l.getPortnum())
            r2 = Referenceable()
            self.failUnlessRaises(WrongTubIdError,
                                  t2.registerReference,
                                  r2,
                                  furlFile=ffn)
            return t2.disownServiceParent()

        d.addCallback(_take2)
        return d
Пример #3
0
    def test_client_v2_signed(self):
        introducer = IntroducerService()
        tub = introducer_furl = None
        app_versions = {"whizzy": "fizzy"}
        client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2",
                                     "my_version", "oldest", app_versions,
                                     fakeseq, FilePath(self.mktemp()))
        furl1 = "pb://[email protected]:0/swissnum"

        private_key, public_key = ed25519.create_signing_keypair()
        public_key_str = remove_prefix(
            ed25519.string_from_verifying_key(public_key), "pub-")

        ann_t0 = make_ann_t(client_v2, furl1, private_key, 10)
        canary0 = Referenceable()
        introducer.remote_publish_v2(ann_t0, canary0)
        a = introducer.get_announcements()
        self.failUnlessEqual(len(a), 1)
        self.assertThat(a[0].canary, Is(canary0))
        self.failUnlessEqual(a[0].index, ("storage", public_key_str))
        self.failUnlessEqual(a[0].announcement["app-versions"], app_versions)
        self.failUnlessEqual(a[0].nickname, u"nick-v2")
        self.failUnlessEqual(a[0].service_name, "storage")
        self.failUnlessEqual(a[0].version, "my_version")
        self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"],
                             furl1)
Пример #4
0
 def test_unreachable(self):
     t = Tub()
     t.setServiceParent(self.s)
     # we call neither .listenOn nor .setLocation
     self.assertEqual(t.locationHints, [])
     self.assertRaises(NoLocationError, t.registerReference,
                       Referenceable())
Пример #5
0
    def test_furlfile(self):
        cfn = "test_tub.FurlFile.test_furlfile.certfile"
        t1 = Tub(certFile=cfn)
        t1.setServiceParent(self.s)
        l = t1.listenOn("tcp:0:interface=127.0.0.1")
        t1.setLocation("127.0.0.1:%d" % l.getPortnum())
        port1 = "tcp:%d:interface=127.0.0.1" % l.getPortnum()
        r1 = Referenceable()
        ffn = "test_tub.FurlFile.test_furlfile.furlfile"
        furl1 = t1.registerReference(r1, furlFile=ffn)
        d = defer.maybeDeferred(t1.disownServiceParent)

        self.failUnless(os.path.exists(ffn))
        self.failUnlessEqual(furl1, open(ffn, "r").read().strip())

        def _take2(res):
            t2 = Tub(certFile=cfn)
            t2.setServiceParent(self.s)
            l = t2.listenOn(port1)
            t2.setLocation("127.0.0.1:%d" % l.getPortnum())
            r2 = Referenceable()
            furl2 = t2.registerReference(r2, furlFile=ffn)
            self.failUnlessEqual(furl1, furl2)
            return t2.disownServiceParent()

        d.addCallback(_take2)
        return d
Пример #6
0
    def test_referenceable(self):
        t1 = Tub()
        t1.setServiceParent(self.s)
        l = t1.listenOn("tcp:0:interface=127.0.0.1")
        t1.setLocation("127.0.0.1:%d" % l.getPortnum())
        r1 = Referenceable()
        # the serialized blob can't keep the reference alive, so you must
        # arrange for that separately
        t1.registerReference(r1)
        t2 = Tub()
        t2.setServiceParent(self.s)
        obj = ("graph tangly", r1)
        d = t1.serialize(obj)
        del r1
        del obj

        def _done(data):
            self.failUnless("their-reference" in data)
            return data

        d.addCallback(_done)
        d.addCallback(lambda data: t2.unserialize(data))

        def _check(obj2):
            self.failUnlessEqual(obj2[0], "graph tangly")
            self.failUnless(isinstance(obj2[1], RemoteReference))

        d.addCallback(_check)
        return d
Пример #7
0
    def test_matching_overlapping_writes(self):
        """
        When doing overlapping writes in immutable uploads, matching writes
        succeed.
        """
        storage_index, renew_secret, cancel_secret = (
            new_storage_index(),
            new_secret(),
            new_secret(),
        )
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums={0},
            allocated_size=25,
            canary=Referenceable(),
        )

        yield allocated[0].callRemote("write", 0, b"1" * 10)
        # Overlapping write that matches:
        yield allocated[0].callRemote("write", 5, b"1" * 20)
        yield allocated[0].callRemote("close")

        buckets = yield self.storage_client.get_buckets(storage_index)
        self.assertEqual(set(buckets.keys()), {0})

        self.assertEqual((yield buckets[0].callRemote("read", 0, 25)),
                         b"1" * 25)
Пример #8
0
    def test_id_collision(self):
        # test replacement case where tubid equals a keyid (one should
        # not replace the other)
        i = IntroducerService()
        ic = IntroducerClient(None, "introducer.furl", u"my_nickname",
                              "my_version", "oldest_version", {})
        sk_s, vk_s = keyutil.make_keypair()
        sk, _ignored = keyutil.parse_privkey(sk_s)
        keyid = keyutil.remove_prefix(vk_s, "pub-v0-")
        furl1 = "pb://[email protected]:123/short"  # base32("short")
        ann_t = ic.create_announcement("storage", make_ann(furl1), sk)
        i.remote_publish_v2(ann_t, Referenceable())
        announcements = i.get_announcements()
        self.failUnlessEqual(len(announcements), 1)
        key1 = ("storage", "v0-" + keyid, None)
        self.failUnlessEqual(announcements[0].index, key1)
        ann1_out = announcements[0].announcement
        self.failUnlessEqual(ann1_out["anonymous-storage-FURL"], furl1)

        furl2 = "pb://%[email protected]:36106/swissnum" % keyid
        ann2 = (furl2, "storage", "RIStorage", "nick1", "ver23", "ver0")
        i.remote_publish(ann2)
        announcements = i.get_announcements()
        self.failUnlessEqual(len(announcements), 2)
        key2 = ("storage", None, keyid)
        wanted = [ad for ad in announcements if ad.index == key2]
        self.failUnlessEqual(len(wanted), 1)
        ann2_out = wanted[0].announcement
        self.failUnlessEqual(ann2_out["anonymous-storage-FURL"], furl2)
Пример #9
0
 def setUp(self):
     TargetMixin.setUp(self)
     self.tubA, self.tubB = self.makeTubs(2)
     self.url_on_b = self.tubB.registerReference(Referenceable())
     self.lookups = []
     self.lookups2 = []
     self.names = {}
     self.names2 = {}
Пример #10
0
 def _take2(res):
     t2 = Tub() # gets a different key
     t2.setServiceParent(self.s)
     t2.listenOn(port1)
     t2.setLocation("127.0.0.1:%d" % portnum)
     r2 = Referenceable()
     self.assertRaises(WrongTubIdError,
                           t2.registerReference, r2, furlFile=ffn)
     return t2.disownServiceParent()
Пример #11
0
 def _take2(res):
     t2 = Tub(certFile=cfn)
     t2.setServiceParent(self.s)
     l = t2.listenOn(port1)
     t2.setLocation("127.0.0.1:%d" % l.getPortnum())
     r2 = Referenceable()
     furl2 = t2.registerReference(r2, furlFile=ffn)
     self.failUnlessEqual(furl1, furl2)
     return t2.disownServiceParent()
Пример #12
0
    def run(self, options):
        basedir = options.basedir
        stdout = options.stdout
        stderr = options.stderr
        if os.path.exists(basedir):
            print >> stderr, "Refusing to touch pre-existing directory %s" % basedir
            return 1

        assert options["port"]
        assert options["location"]

        os.makedirs(basedir)
        os.makedirs(os.path.join(basedir, "services"))
        os.chmod(basedir, 0700)

        # Start the server and let it create the key. The base FURL will be
        # written to a file so that subsequent 'add' and 'list' can compute
        # FURLs without needing to run the Tub (which might already be
        # running).

        f = open(os.path.join(basedir, "port"), "w")
        f.write("%s\n" % options["port"])
        f.close()
        # we'll overwrite BASEDIR/port if necessary

        f = open(os.path.join(basedir, "location"), "w")
        f.write("%s\n" % options["location"])
        f.close()

        f = open(os.path.join(basedir, "umask"), "w")
        f.write("%04o\n" % options["umask"])
        f.close()

        save_service_data(basedir, {"version": 1, "services": {}})

        a = AppServer(basedir, stdout)
        tub = a.tub

        sample_furl = tub.registerReference(Referenceable())
        furl_prefix = sample_furl[:sample_furl.rfind("/") + 1]
        f = open(os.path.join(basedir, "furl_prefix"), "w")
        f.write(furl_prefix + "\n")
        f.close()

        f = open(os.path.join(basedir, "flappserver.tac"), "w")
        stashed_path = ""
        for p in sys.path:
            stashed_path += "  %r,\n" % p
        f.write(FLAPPSERVER_TACFILE % {'path': stashed_path})
        f.close()

        if not options["quiet"]:
            print >> stdout, "Foolscap Application Server created in %s" % basedir
            print >> stdout, "TubID %s, listening on port %s" % (
                tub.getTubID(), options["port"])
            print >> stdout, "Now launch the daemon with 'flappserver start %s'" % basedir
        return defer.succeed(0)
Пример #13
0
    def abort_or_disconnect_half_way(self, abort_or_disconnect):
        """
        If we disconnect/abort in the middle of writing to a bucket, all data
        is wiped, and it's even possible to write different data to the bucket.

        (In the real world one shouldn't do that, but writing different data is
        a good way to test that the original data really was wiped.)

        ``abort_or_disconnect`` is a callback that takes a bucket and aborts up
        load, or perhaps disconnects the whole connection.
        """
        storage_index, renew_secret, cancel_secret = (
            new_storage_index(),
            new_secret(),
            new_secret(),
        )
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums={0},
            allocated_size=1024,
            canary=Referenceable(),
        )

        # Bucket 1 get some data written (but not all, or HTTP implicitly
        # finishes the upload)
        yield allocated[0].callRemote("write", 0, b"1" * 1023)

        # Disconnect or abort, depending on the test:
        yield abort_or_disconnect(allocated[0])

        # Write different data with no complaint:
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums={0},
            allocated_size=1024,
            canary=Referenceable(),
        )
        yield allocated[0].callRemote("write", 0, b"2" * 1024)
Пример #14
0
 def test_unhandled_objects(self):
     obj1 = [1, Referenceable()]
     d = self.shouldFail(Violation, "1",
                         "This object can only be serialized by a broker",
                         serialize, obj1)
     obj2 = [1, Foo()]
     d.addCallback(lambda ign: self.shouldFail(
         Violation, "2",
         "cannot serialize <foolscap.test.test_serialize.Foo instance",
         serialize, obj2))
     return d
Пример #15
0
    def __init__(self, tub, introducer_furl, nickname, my_version,
                 oldest_supported, app_versions, sequencer):
        self._tub = tub
        self.introducer_furl = introducer_furl

        assert type(nickname) is unicode
        self._nickname = nickname
        self._my_version = my_version
        self._oldest_supported = oldest_supported
        self._app_versions = app_versions
        self._sequencer = sequencer

        self._my_subscriber_info = {
            "version": 0,
            "nickname": self._nickname,
            "app-versions": self._app_versions,
            "my-version": self._my_version,
            "oldest-supported": self._oldest_supported,
        }
        self._stub_client = None  # for_v1
        self._stub_client_furl = None

        self._outbound_announcements = {}  # not signed
        self._published_announcements = {}  # signed
        self._canary = Referenceable()

        self._publisher = None

        self._local_subscribers = []  # (servicename,cb,args,kwargs) tuples
        self._subscribed_service_names = set()
        self._subscriptions = set()  # requests we've actually sent

        # _inbound_announcements remembers one announcement per
        # (servicename,serverid) pair. Anything that arrives with the same
        # pair will displace the previous one. This stores tuples of
        # (unpacked announcement dictionary, verifyingkey, rxtime). The ann
        # dicts can be compared for equality to distinguish re-announcement
        # from updates. It also provides memory for clients who subscribe
        # after startup.
        self._inbound_announcements = {}

        self.encoding_parameters = None

        # hooks for unit tests
        self._debug_counts = {
            "inbound_message": 0,
            "inbound_announcement": 0,
            "wrong_service": 0,
            "duplicate_announcement": 0,
            "update": 0,
            "new_announcement": 0,
            "outbound_message": 0,
        }
        self._debug_outstanding = 0
Пример #16
0
    def test_written_shares_are_allocated(self):
        """
        Shares that are fully written to show up as allocated in result from
        ``IStorageServer.allocate_buckets()``.  Partially-written or empty
        shares don't.
        """
        storage_index, renew_secret, cancel_secret = (
            new_storage_index(),
            new_secret(),
            new_secret(),
        )
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums=set(range(5)),
            allocated_size=1024,
            canary=Referenceable(),
        )

        # Bucket 1 is fully written in one go.
        yield allocated[1].callRemote("write", 0, b"1" * 1024)
        yield allocated[1].callRemote("close")

        # Bucket 2 is fully written in two steps.
        yield allocated[2].callRemote("write", 0, b"1" * 512)
        yield allocated[2].callRemote("write", 512, b"2" * 512)
        yield allocated[2].callRemote("close")

        # Bucket 0 has partial write.
        yield allocated[0].callRemote("write", 0, b"1" * 512)

        (already_got, _) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums=set(range(5)),
            allocated_size=1024,
            canary=Referenceable(),
        )
        self.assertEqual(already_got, {1, 2})
Пример #17
0
 def setUp(self):
     TargetMixin.setUp(self)
     self.tubA, self.tubB = [GoodEnoughTub(), GoodEnoughTub()]
     self.services = [self.tubA, self.tubB]
     self.tubA.startService()
     self.tubB.startService()
     l = self.tubB.listenOn("tcp:0:interface=127.0.0.1")
     self.tubB.setLocation("127.0.0.1:%d" % l.getPortnum())
     self.url_on_b = self.tubB.registerReference(Referenceable())
     self.lookups = []
     self.lookups2 = []
     self.names = {}
     self.names2 = {}
Пример #18
0
    def __init__(self, tub, introducer_furl, nickname, my_version,
                 oldest_supported, sequencer, cache_filepath):
        self._tub = tub
        self.introducer_furl = introducer_furl

        assert isinstance(nickname, str)
        self._nickname = nickname
        self._my_version = my_version
        self._oldest_supported = oldest_supported
        self._sequencer = sequencer
        self._cache_filepath = cache_filepath

        self._my_subscriber_info = {
            b"version": 0,
            b"nickname": self._nickname,
            b"app-versions": [],
            b"my-version": self._my_version,
            b"oldest-supported": self._oldest_supported,
        }

        self._outbound_announcements = {}  # not signed
        self._published_announcements = {}  # signed
        self._canary = Referenceable()

        self._publisher = None
        self._since = None

        self._local_subscribers = {}  # {servicename: ObserverList}
        self._subscriptions = set()  # requests we've actually sent

        # _inbound_announcements remembers one announcement per
        # (servicename,serverid) pair. Anything that arrives with the same
        # pair will displace the previous one. This stores tuples of
        # (unpacked announcement dictionary, verifyingkey, rxtime). The ann
        # dicts can be compared for equality to distinguish re-announcement
        # from updates. It also provides memory for clients who subscribe
        # after startup.
        self._inbound_announcements = {}

        # hooks for unit tests
        self._debug_counts = {
            "inbound_message": 0,
            "inbound_announcement": 0,
            "wrong_service": 0,
            "duplicate_announcement": 0,
            "update": 0,
            "new_announcement": 0,
            "outbound_message": 0,
        }
        self._debug_outstanding = 0
Пример #19
0
 def test_set_location_automatically(self):
     t = Tub()
     l = t.listenOn("tcp:0")
     t.setServiceParent(self.s)
     d = t.setLocationAutomatically()
     d.addCallback(lambda res: t.registerReference(Referenceable()))
     def _check(furl):
         sr = SturdyRef(furl)
         portnum = l.getPortnum()
         for lh in sr.locationHints:
             self.failUnlessEqual(lh[2], portnum, lh)
         self.failUnless(("tcp", "127.0.0.1", portnum) in sr.locationHints)
     d.addCallback(_check)
     return d
Пример #20
0
 def test_allocate_buckets_new(self):
     """
     allocate_buckets() with a new storage index returns the matching
     shares.
     """
     (already_got, allocated) = yield self.storage_client.allocate_buckets(
         new_storage_index(),
         renew_secret=new_secret(),
         cancel_secret=new_secret(),
         sharenums=set(range(5)),
         allocated_size=1024,
         canary=Referenceable(),
     )
     self.assertEqual(already_got, set())
     self.assertEqual(set(allocated.keys()), set(range(5)))
Пример #21
0
    def create_share(self):
        """Create a share, return the storage index."""
        storage_index = new_storage_index()
        renew_secret = new_secret()
        cancel_secret = new_secret()
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret=renew_secret,
            cancel_secret=cancel_secret,
            sharenums=set(range(1)),
            allocated_size=10,
            canary=Referenceable(),
        )

        yield allocated[0].callRemote("write", 0, b"0123456789")
        yield allocated[0].callRemote("close")
        returnValue((storage_index, renew_secret, cancel_secret))
Пример #22
0
 def test_referenceables_die(self):
     # serialized data will not keep the referenceable alive
     t1 = Tub()
     t1.setServiceParent(self.s)
     l = t1.listenOn("tcp:0:interface=127.0.0.1")
     t1.setLocation("127.0.0.1:%d" % l.getPortnum())
     r1 = Referenceable()
     t2 = Tub()
     t2.setServiceParent(self.s)
     obj = ("graph tangly", r1)
     d = t1.serialize(obj)
     del r1
     del obj
     gc.collect()
     d.addCallback(lambda data: self.shouldFail(
         KeyError, "test_referenceables_die",
         "unable to find reference for name", t2.unserialize, data))
     return d
Пример #23
0
    def appserver_ready(self, _ignored, options):
        basedir = options.basedir
        stdout = options.stdout
        quiet = options["quiet"]

        tub = self.server.tub
        # what port is it actually listening on?
        l0 = tub.getListeners()[0]

        port = options["port"]
        got_port = port
        pieces = port.split(":")
        if "0" in pieces:
            # If the --port argument didn't tightly specify the port to use,
            # write down the one we actually got, so we'll keep using the
            # same one later
            pieces[pieces.index("0")] = str(l0.getPortnum())
            if pieces[0] != "tcp":
                pieces = ["tcp"] + pieces
            got_port = ":".join(pieces)
            f = open(os.path.join(basedir, "port"), "w")
            f.write(got_port + "\n")
            f.close()

        tubid = tub.getTubID()

        sample_furl = tub.registerReference(Referenceable())
        furl_prefix = sample_furl[:sample_furl.rfind("/") + 1]
        f = open(os.path.join(basedir, "furl_prefix"), "w")
        f.write(furl_prefix + "\n")
        f.close()

        f = open(os.path.join(basedir, "flappserver.tac"), "w")
        stashed_path = ""
        for p in sys.path:
            stashed_path += "  %r,\n" % p
        f.write(FLAPPSERVER_TACFILE % {'path': stashed_path})
        f.close()

        if not quiet:
            print >> stdout, "Foolscap Application Server created in %s" % basedir
            print >> stdout, "TubID %s, listening on port %s" % (tubid,
                                                                 got_port)
            print >> stdout, "Now launch the daemon with 'flappserver start %s'" % basedir
Пример #24
0
 def test_client_v2_unsigned(self):
     introducer = IntroducerService()
     tub = introducer_furl = None
     app_versions = {"whizzy": "fizzy"}
     client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2",
                                  "my_version", "oldest", app_versions)
     furl1 = "pb://[email protected]:0/swissnum"
     tubid = "62ubehyunnyhzs7r6vdonnm2hpi52w6y"
     ann_s0 = make_ann_t(client_v2, furl1, None, 10.0)
     canary0 = Referenceable()
     introducer.remote_publish_v2(ann_s0, canary0)
     a = introducer.get_announcements()
     self.failUnlessEqual(len(a), 1)
     self.failUnlessIdentical(a[0].canary, canary0)
     self.failUnlessEqual(a[0].index, ("storage", None, tubid))
     self.failUnlessEqual(a[0].announcement["app-versions"], app_versions)
     self.failUnlessEqual(a[0].nickname, u"nick-v2")
     self.failUnlessEqual(a[0].service_name, "storage")
     self.failUnlessEqual(a[0].version, "my_version")
     self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"],
                          furl1)
Пример #25
0
    def test_set_location_automatically(self):
        t = GoodEnoughTub()
        l = t.listenOn("tcp:0")
        t.setServiceParent(self.s)
        d = t.setLocationAutomatically()
        d.addCallback(lambda res: t.registerReference(Referenceable()))

        def _check(furl):
            sr = SturdyRef(furl)
            portnum = l.getPortnum()
            if sr.encrypted:
                for lh in sr.locationHints:
                    self.failUnlessEqual(lh[2], portnum, lh)
                self.failUnless(("ipv4", "127.0.0.1",
                                 portnum) in sr.locationHints)
            else:
                # TODO: unauthenticated tubs need review, I think they
                # deserve to have tubids and multiple connection hints
                pass

        d.addCallback(_check)
        return d
Пример #26
0
    def test_written_shares_are_readable(self):
        """
        Shares that are fully written to can be read.

        The result is not affected by the order in which writes
        happened, only by their offsets.
        """
        storage_index, renew_secret, cancel_secret = (
            new_storage_index(),
            new_secret(),
            new_secret(),
        )
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums=set(range(5)),
            allocated_size=1024,
            canary=Referenceable(),
        )

        # Bucket 1 is fully written in order
        yield allocated[1].callRemote("write", 0, b"1" * 512)
        yield allocated[1].callRemote("write", 512, b"2" * 512)
        yield allocated[1].callRemote("close")

        # Bucket 2 is fully written in reverse.
        yield allocated[2].callRemote("write", 512, b"4" * 512)
        yield allocated[2].callRemote("write", 0, b"3" * 512)
        yield allocated[2].callRemote("close")

        buckets = yield self.storage_client.get_buckets(storage_index)
        self.assertEqual(set(buckets.keys()), {1, 2})

        self.assertEqual((yield buckets[1].callRemote("read", 0, 1024)),
                         b"1" * 512 + b"2" * 512)
        self.assertEqual((yield buckets[2].callRemote("read", 0, 1024)),
                         b"3" * 512 + b"4" * 512)
Пример #27
0
 def test_client_v2_signed(self):
     introducer = IntroducerService()
     tub = introducer_furl = None
     app_versions = {"whizzy": "fizzy"}
     client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2",
                                  "my_version", "oldest", app_versions,
                                  fakeseq, FilePath(self.mktemp()))
     furl1 = "pb://[email protected]:0/swissnum"
     sk_s, vk_s = keyutil.make_keypair()
     sk, _ignored = keyutil.parse_privkey(sk_s)
     pks = keyutil.remove_prefix(vk_s, "pub-")
     ann_t0 = make_ann_t(client_v2, furl1, sk, 10)
     canary0 = Referenceable()
     introducer.remote_publish_v2(ann_t0, canary0)
     a = introducer.get_announcements()
     self.failUnlessEqual(len(a), 1)
     self.failUnlessIdentical(a[0].canary, canary0)
     self.failUnlessEqual(a[0].index, ("storage", pks))
     self.failUnlessEqual(a[0].announcement["app-versions"], app_versions)
     self.failUnlessEqual(a[0].nickname, u"nick-v2")
     self.failUnlessEqual(a[0].service_name, "storage")
     self.failUnlessEqual(a[0].version, "my_version")
     self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"], furl1)
Пример #28
0
    def test_non_matching_overlapping_writes(self):
        """
        When doing overlapping writes in immutable uploads, non-matching writes
        fail.
        """
        storage_index, renew_secret, cancel_secret = (
            new_storage_index(),
            new_secret(),
            new_secret(),
        )
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums={0},
            allocated_size=30,
            canary=Referenceable(),
        )

        yield allocated[0].callRemote("write", 0, b"1" * 25)
        # Overlapping write that doesn't match:
        with self.assertRaises(RemoteException):
            yield allocated[0].callRemote("write", 20, b"2" * 10)
Пример #29
0
    def test_read_bucket_at_offset(self):
        """
        Given a read bucket returned from ``IStorageServer.get_buckets()``, it
        is possible to read at different offsets and lengths, with reads past
        the end resulting in empty bytes.
        """
        length = 256 * 17

        storage_index = new_storage_index()
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret=new_secret(),
            cancel_secret=new_secret(),
            sharenums=set(range(1)),
            allocated_size=length,
            canary=Referenceable(),
        )

        total_data = _randbytes(256 * 17)
        yield allocated[0].callRemote("write", 0, total_data)
        yield allocated[0].callRemote("close")

        buckets = yield self.storage_client.get_buckets(storage_index)
        bucket = buckets[0]
        for start, to_read in [
            (0, 250),  # fraction
            (0, length),  # whole thing
            (100, 1024),  # offset fraction
            (length + 1, 100),  # completely out of bounds
            (length - 100, 200),  # partially out of bounds
        ]:
            data = yield bucket.callRemote("read", start, to_read)
            self.assertEqual(
                data,
                total_data[start:start + to_read],
                "Didn't match for start {}, length {}".format(start, to_read),
            )
Пример #30
0
    def test_get_buckets_skips_unfinished_buckets(self):
        """
        Buckets that are not fully written are not returned by
        ``IStorageServer.get_buckets()`` implementations.
        """
        storage_index = new_storage_index()
        (_, allocated) = yield self.storage_client.allocate_buckets(
            storage_index,
            renew_secret=new_secret(),
            cancel_secret=new_secret(),
            sharenums=set(range(5)),
            allocated_size=10,
            canary=Referenceable(),
        )

        # Bucket 1 is fully written
        yield allocated[1].callRemote("write", 0, b"1" * 10)
        yield allocated[1].callRemote("close")

        # Bucket 2 is partially written
        yield allocated[2].callRemote("write", 0, b"1" * 5)

        buckets = yield self.storage_client.get_buckets(storage_index)
        self.assertEqual(set(buckets.keys()), {1})
Пример #31
0
Файл: app.py Проект: UfSoft/afm
 def __init__(self):
     Referenceable.__init__(self)
     eventmanager.register_event_handler("CoreUrlGenerated",
                                         self._update_core_url)