def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS,
                  expected_block_hashes, expected_share_hashes):
        data = make_data(datalen)
        # force use of multiple segments
        e = encode.Encoder()
        u = upload.Data(data, convergence="some convergence string")
        u.max_segment_size = max_segment_size
        u.encoding_param_k = 25
        u.encoding_param_happy = 75
        u.encoding_param_n = 100
        eu = upload.EncryptAnUploadable(u)
        d = e.set_encrypted_uploadable(eu)

        all_shareholders = []
        def _ready(res):
            k,happy,n = e.get_param("share_counts")
            _assert(n == NUM_SHARES) # else we'll be completely confused
            numsegs = e.get_param("num_segments")
            _assert(numsegs == NUM_SEGMENTS, numsegs, NUM_SEGMENTS)
            segsize = e.get_param("segment_size")
            _assert( (NUM_SEGMENTS-1)*segsize < len(data) <= NUM_SEGMENTS*segsize,
                     NUM_SEGMENTS, segsize,
                     (NUM_SEGMENTS-1)*segsize, len(data), NUM_SEGMENTS*segsize)

            shareholders = {}
            servermap = {}
            for shnum in range(NUM_SHARES):
                peer = FakeBucketReaderWriterProxy()
                shareholders[shnum] = peer
                servermap.setdefault(shnum, set()).add(peer.get_peerid())
                all_shareholders.append(peer)
            e.set_shareholders(shareholders, servermap)
            return e.start()
        d.addCallback(_ready)

        def _check(res):
            verifycap = res
            self.failUnless(isinstance(verifycap.uri_extension_hash, str))
            self.failUnlessEqual(len(verifycap.uri_extension_hash), 32)
            for i,peer in enumerate(all_shareholders):
                self.failUnless(peer.closed)
                self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS)
                # each peer gets a full tree of block hashes. For 3 or 4
                # segments, that's 7 hashes. For 5 segments it's 15 hashes.
                self.failUnlessEqual(len(peer.block_hashes),
                                     expected_block_hashes)
                for h in peer.block_hashes:
                    self.failUnlessEqual(len(h), 32)
                # each peer also gets their necessary chain of share hashes.
                # For 100 shares (rounded up to 128 leaves), that's 8 hashes
                self.failUnlessEqual(len(peer.share_hashes),
                                     expected_share_hashes)
                for (hashnum, h) in peer.share_hashes:
                    self.failUnless(isinstance(hashnum, int))
                    self.failUnlessEqual(len(h), 32)
        d.addCallback(_check)

        return d
Beispiel #2
0
def find_share_for_target(target):
    target_s = base32.b2a(target)
    prefix = "The first share of this file will be placed on " + target_s + "\n"
    prefix += "This data is random: "
    attempts = 0
    while True:
        attempts += 1
        suffix = base32.b2a(os.urandom(10))
        if verbose: print " trying", suffix,
        data = prefix + suffix + "\n"
        assert len(data) > 55  # no LIT files
        # now, what storage index will this get?
        u = upload.Data(data, convergence)
        eu = upload.EncryptAnUploadable(u)
        d = eu.get_storage_index()  # this happens to run synchronously

        def _got_si(si, data=data):
            if verbose: print "SI", base32.b2a(si),
            peerlist = get_permuted_peers(si)
            if peerlist[0] == target:
                # great!
                if verbose: print "  yay!"
                fn = base32.b2a(target)
                if nodes[target]:
                    nickname = nodes[target].replace("/", "_")
                    fn += "-" + nickname
                fn += ".txt"
                fn = os.path.join("canaries", fn)
                open(fn, "w").write(data)
                return True
            # nope, must try again
            if verbose: print "  boo"
            return False

        d.addCallback(_got_si)
        # get sneaky and look inside the Deferred for the synchronous result
        if d.result:
            return attempts