Exemple #1
0
class DataHandle(unittest.TestCase):
    def setUp(self):
        self.test_data = "Test Data" * 50000
        self.uploadable = MutableData(self.test_data)

    def test_datahandle_read(self):
        chunk_size = 10
        for i in xrange(0, len(self.test_data), chunk_size):
            data = self.uploadable.read(chunk_size)
            data = "".join(data)
            start = i
            end = i + chunk_size
            self.failUnlessEqual(data, self.test_data[start:end])

    def test_datahandle_get_size(self):
        actual_size = len(self.test_data)
        size = self.uploadable.get_size()
        self.failUnlessEqual(size, actual_size)

    def test_datahandle_get_size_out_of_order(self):
        # We should be able to call get_size whenever we want without
        # disturbing the location of the seek pointer.
        chunk_size = 100
        data = self.uploadable.read(chunk_size)
        self.failUnlessEqual("".join(data), self.test_data[:chunk_size])

        # Now get the size.
        size = self.uploadable.get_size()
        self.failUnlessEqual(size, len(self.test_data))

        # Now get more data. We should be right where we left off.
        more_data = self.uploadable.read(chunk_size)
        start = chunk_size
        end = chunk_size * 2
        self.failUnlessEqual("".join(more_data), self.test_data[start:end])
 def _created(n):
     d = defer.succeed(None)
     d.addCallback(lambda res: n.get_servermap(MODE_READ))
     d.addCallback(lambda smap: smap.dump(StringIO()))
     d.addCallback(
         lambda sio: self.failUnless("3-of-10" in sio.getvalue()))
     d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1")))
     d.addCallback(lambda res: self.failUnlessIdentical(res, None))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1"))
     d.addCallback(lambda res: n.get_size_of_best_version())
     d.addCallback(
         lambda size: self.failUnlessEqual(size, len(b"contents 1")))
     d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2")))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2"))
     d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
     d.addCallback(
         lambda smap: n.upload(MutableData(b"contents 3"), smap))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3"))
     d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
     d.addCallback(lambda smap: n.download_version(
         smap, smap.best_recoverable_version()))
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3"))
     # test a file that is large enough to overcome the
     # mapupdate-to-retrieve data caching (i.e. make the shares larger
     # than the default readsize, which is 2000 bytes). A 15kB file
     # will have 5kB shares.
     d.addCallback(lambda res: n.overwrite(
         MutableData(b"large size file" * 1000)))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(lambda res: self.failUnlessEqual(
         res, b"large size file" * 1000))
     return d
Exemple #3
0
        def _created(n):
            d = defer.succeed(None)
            d.addCallback(lambda ignored: n.get_servermap(MODE_READ))

            def _then(servermap):
                dumped = servermap.dump(StringIO())
                self.assertThat(dumped.getvalue(), Contains("3-of-10"))

            d.addCallback(_then)
            # Now overwrite the contents with some new contents. We want
            # to make them big enough to force the file to be uploaded
            # in more than one segment.
            big_contents = b"contents1" * 100000  # about 900 KiB
            big_contents_uploadable = MutableData(big_contents)
            d.addCallback(lambda ignored: n.overwrite(big_contents_uploadable))
            d.addCallback(lambda ignored: n.download_best_version())
            d.addCallback(
                lambda data: self.assertThat(data, Equals(big_contents)))
            # Overwrite the contents again with some new contents. As
            # before, they need to be big enough to force multiple
            # segments, so that we make the downloader deal with
            # multiple segments.
            bigger_contents = b"contents2" * 1000000  # about 9MiB
            bigger_contents_uploadable = MutableData(bigger_contents)
            d.addCallback(
                lambda ignored: n.overwrite(bigger_contents_uploadable))
            d.addCallback(lambda ignored: n.download_best_version())
            d.addCallback(
                lambda data: self.assertThat(data, Equals(bigger_contents)))
            return d
Exemple #4
0
 def _do_update(ignored):
     new_data = MutableData("foo bar baz" * 100000)
     new_small_data = MutableData("foo bar baz" * 10)
     d1 = self.mdmf_node.overwrite(new_data)
     d2 = self.sdmf_node.overwrite(new_small_data)
     dl = gatherResults([d1, d2])
     return dl
Exemple #5
0
 def _created(n):
     d = defer.succeed(None)
     d.addCallback(lambda res: n.get_servermap(MODE_READ))
     d.addCallback(lambda smap: smap.dump(StringIO()))
     d.addCallback(
         lambda sio: self.assertTrue("3-of-10" in sio.getvalue()))
     d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1")))
     d.addCallback(lambda res: self.assertThat(res, Is(None)))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 1")))
     d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2")))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 2")))
     d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
     d.addCallback(
         lambda smap: n.upload(MutableData(b"contents 3"), smap))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 3")))
     d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
     d.addCallback(lambda smap: n.download_version(
         smap, smap.best_recoverable_version()))
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 3")))
     return d
Exemple #6
0
    def _get_initial_contents(self, contents):
        if contents is None:
            return MutableData("")

        if isinstance(contents, str):
            return MutableData(contents)

        if IMutableUploadable.providedBy(contents):
            return contents

        assert callable(contents), "%s should be callable, not %s" % \
               (contents, type(contents))
        return contents(self)
Exemple #7
0
 def test_toplevel_overwrite(self):
     new_data = MutableData("foo bar baz" * 100000)
     new_small_data = MutableData("foo bar baz" * 10)
     d = self.do_upload()
     d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
     d.addCallback(lambda ignored: self.mdmf_node.download_best_version())
     d.addCallback(
         lambda data: self.failUnlessEqual(data, "foo bar baz" * 100000))
     d.addCallback(lambda ignored: self.sdmf_node.overwrite(new_small_data))
     d.addCallback(lambda ignored: self.sdmf_node.download_best_version())
     d.addCallback(
         lambda data: self.failUnlessEqual(data, "foo bar baz" * 10))
     return d
Exemple #8
0
    def _set_up(self, mutable, testdir, num_clients=1, num_servers=10):
        self.mutable = mutable
        if mutable:
            self.basedir = "hung_server/mutable_" + testdir
        else:
            self.basedir = "hung_server/immutable_" + testdir

        self.set_up_grid(num_clients=num_clients, num_servers=num_servers)

        self.c0 = self.g.clients[0]
        nm = self.c0.nodemaker
        self.servers = sorted([(s.get_serverid(), s.get_rref())
                               for s in nm.storage_broker.get_connected_servers()])
        self.servers = self.servers[5:] + self.servers[:5]

        if mutable:
            uploadable = MutableData(mutable_plaintext)
            d = nm.create_mutable_file(uploadable)
            def _uploaded_mutable(node):
                self.uri = node.get_uri()
                self.shares = self.find_uri_shares(self.uri)
            d.addCallback(_uploaded_mutable)
        else:
            data = upload.Data(immutable_plaintext, convergence="")
            d = self.c0.upload(data)
            def _uploaded_immutable(upload_res):
                self.uri = upload_res.get_uri()
                self.shares = self.find_uri_shares(self.uri)
            d.addCallback(_uploaded_immutable)
        return d
Exemple #9
0
 def publish_multiple(self, version=0):
     self.CONTENTS = [b"Contents 0",
                      b"Contents 1",
                      b"Contents 2",
                      b"Contents 3a",
                      b"Contents 3b"]
     self.uploadables = [MutableData(d) for d in self.CONTENTS]
     self._copied_shares = {}
     self._storage = FakeStorage()
     self._nodemaker = make_nodemaker(self._storage)
     d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
     def _created(node):
         self._fn = node
         # now create multiple versions of the same file, and accumulate
         # their shares, so we can mix and match them later.
         d = defer.succeed(None)
         d.addCallback(self._copy_shares, 0)
         d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
         d.addCallback(self._copy_shares, 1)
         d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
         d.addCallback(self._copy_shares, 2)
         d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
         d.addCallback(self._copy_shares, 3)
         # now we replace all the shares with version s3, and upload a new
         # version to get s4b.
         rollback = dict([(i,2) for i in range(10)])
         d.addCallback(lambda res: self._set_versions(rollback))
         d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
         d.addCallback(self._copy_shares, 4)
         # we leave the storage in state 4
         return d
     d.addCallback(_created)
     return d
 def do_upload_sdmf(self):
     d = self.nm.create_mutable_file(MutableData(self.small_data))
     def _then(n):
         assert isinstance(n, MutableFileNode)
         self.sdmf_node = n
     d.addCallback(_then)
     return d
        def _created(node):
            self.uri = node.get_uri()
            # also confirm that the cap has no extension fields
            pieces = self.uri.split(b":")
            self.failUnlessEqual(len(pieces), 4)

            return node.overwrite(MutableData(b"contents1" * 100000))
Exemple #12
0
    def _encode(self, k, n, data, version=SDMF_VERSION):
        # encode 'data' into a peerid->shares dict.

        fn = self._fn
        # disable the nodecache, since for these tests we explicitly need
        # multiple nodes pointing at the same file
        self._nodemaker._node_cache = DevNullDictionary()
        fn2 = self._nodemaker.create_from_cap(fn.get_uri())
        # then we copy over other fields that are normally fetched from the
        # existing shares
        fn2._pubkey = fn._pubkey
        fn2._privkey = fn._privkey
        fn2._encprivkey = fn._encprivkey
        # and set the encoding parameters to something completely different
        fn2._required_shares = k
        fn2._total_shares = n

        s = self._storage
        s._peers = {}  # clear existing storage
        p2 = Publish(fn2, self._storage_broker, None)
        uploadable = MutableData(data)
        d = p2.publish(uploadable)

        def _published(res):
            shares = s._peers
            s._peers = {}
            return shares

        d.addCallback(_published)
        return d
Exemple #13
0
 def test_retrieve_surprise(self):
     self.basedir = "mutable/Problems/test_retrieve_surprise"
     self.set_up_grid()
     nm = self.g.clients[0].nodemaker
     d = nm.create_mutable_file(MutableData(b"contents 1"*4000))
     def _created(n):
         d = defer.succeed(None)
         d.addCallback(lambda res: n.get_servermap(MODE_READ))
         def _got_smap1(smap):
             # stash the old state of the file
             self.old_map = smap
         d.addCallback(_got_smap1)
         # then modify the file, leaving the old map untouched
         d.addCallback(lambda res: log.msg("starting winning write"))
         d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2")))
         # now attempt to retrieve the old version with the old servermap.
         # This will look like someone has changed the file since we
         # updated the servermap.
         d.addCallback(lambda res: log.msg("starting doomed read"))
         d.addCallback(lambda res:
                       self.shouldFail(NotEnoughSharesError,
                                       "test_retrieve_surprise",
                                       "ran out of servers: have 0 of 1",
                                       n.download_version,
                                       self.old_map,
                                       self.old_map.best_recoverable_version(),
                                       ))
         return d
     d.addCallback(_created)
     return d
Exemple #14
0
 def test_unexpected_shares(self):
     # upload the file, take a servermap, shut down one of the servers,
     # upload it again (causing shares to appear on a new server), then
     # upload using the old servermap. The last upload should fail with an
     # UncoordinatedWriteError, because of the shares that didn't appear
     # in the servermap.
     self.basedir = "mutable/Problems/test_unexpected_shares"
     self.set_up_grid()
     nm = self.g.clients[0].nodemaker
     d = nm.create_mutable_file(MutableData(b"contents 1"))
     def _created(n):
         d = defer.succeed(None)
         d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
         def _got_smap1(smap):
             # stash the old state of the file
             self.old_map = smap
             # now shut down one of the servers
             peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
             self.g.remove_server(peer0)
             # then modify the file, leaving the old map untouched
             log.msg("starting winning write")
             return n.overwrite(MutableData(b"contents 2"))
         d.addCallback(_got_smap1)
         # now attempt to modify the file with the old servermap. This
         # will look just like an uncoordinated write, in which every
         # single share got updated between our mapupdate and our publish
         d.addCallback(lambda res: log.msg("starting doomed write"))
         d.addCallback(lambda res:
                       self.shouldFail(UncoordinatedWriteError,
                                       "test_surprise", None,
                                       n.upload,
                                       MutableData(b"contents 2a"), self.old_map))
         return d
     d.addCallback(_created)
     return d
Exemple #15
0
 def OFF_test_retrieve_producer_sdmf(self):
     data = b"contents1" * 100000
     d = self.nodemaker.create_mutable_file(MutableData(data),
                                            version=SDMF_VERSION)
     d.addCallback(lambda node: node.get_best_mutable_version())
     d.addCallback(self._test_retrieve_producer, "SDMF", data)
     return d
Exemple #16
0
        def _created(node):
            self.uri = node.get_uri()
            # also confirm that the cap has no extension fields
            pieces = self.uri.split(b":")
            self.assertThat(pieces, HasLength(4))

            return node.overwrite(MutableData(b"contents1" * 100000))
Exemple #17
0
 def do_publish_surprise(self, version):
     self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
     self.set_up_grid()
     nm = self.g.clients[0].nodemaker
     d = nm.create_mutable_file(MutableData(b"contents 1"),
                                 version=version)
     def _created(n):
         d = defer.succeed(None)
         d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
         def _got_smap1(smap):
             # stash the old state of the file
             self.old_map = smap
         d.addCallback(_got_smap1)
         # then modify the file, leaving the old map untouched
         d.addCallback(lambda res: log.msg("starting winning write"))
         d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2")))
         # now attempt to modify the file with the old servermap. This
         # will look just like an uncoordinated write, in which every
         # single share got updated between our mapupdate and our publish
         d.addCallback(lambda res: log.msg("starting doomed write"))
         d.addCallback(lambda res:
                       self.shouldFail(UncoordinatedWriteError,
                                       "test_publish_surprise", None,
                                       n.upload,
                                       MutableData(b"contents 2a"), self.old_map))
         return d
     d.addCallback(_created)
     return d
Exemple #18
0
    def test_bad_server_overlap(self):
        # like test_bad_server, but with no extra unused servers to fall back
        # upon. This means that we must re-use a server which we've already
        # used. If we don't remember the fact that we sent them one share
        # already, we'll mistakenly think we're experiencing an
        # UncoordinatedWriteError.

        # Break one server, then create the file: the initial publish should
        # complete with an alternate server. Breaking a second server should
        # not prevent an update from succeeding either.
        self.basedir = "mutable/Problems/test_bad_server_overlap"
        self.set_up_grid()
        nm = self.g.clients[0].nodemaker
        sb = nm.storage_broker

        peerids = [s.get_serverid() for s in sb.get_connected_servers()]
        self.g.break_server(peerids[0])

        d = nm.create_mutable_file(MutableData(b"contents 1"))
        def _created(n):
            d = n.download_best_version()
            d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1"))
            # now break one of the remaining servers
            def _break_second_server(res):
                self.g.break_server(peerids[1])
            d.addCallback(_break_second_server)
            d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2")))
            # that ought to work too
            d.addCallback(lambda res: n.download_best_version())
            d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2"))
            return d
        d.addCallback(_created)
        return d
 def _created(n):
     d = n.download_best_version()
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1"))
     upload2 = MutableData(b"contents 2")
     d.addCallback(lambda res: n.overwrite(upload2))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2"))
     return d
 def do_upload_mdmf(self):
     d = self.nm.create_mutable_file(MutableData(self.data),
                                     version=MDMF_VERSION)
     def _then(n):
         assert isinstance(n, MutableFileNode)
         self.mdmf_node = n
     d.addCallback(_then)
     return d
Exemple #21
0
 def _got_smap1(smap):
     # stash the old state of the file
     self.old_map = smap
     # now shut down one of the servers
     peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
     self.g.remove_server(peer0)
     # then modify the file, leaving the old map untouched
     log.msg("starting winning write")
     return n.overwrite(MutableData(b"contents 2"))
 def _run(ign):
     d = defer.succeed(None)
     d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
     d.addCallback(lambda mv: mv.update(MutableData((2 * new_segment) + b"replaced"),
                                        replace_offset))
     d.addCallback(lambda ignored: self.mdmf_node.download_best_version())
     d.addCallback(lambda results:
                   self.failUnlessEqual(results, new_data))
     return d
 def _run(ign):
     d = defer.succeed(None)
     d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
     d.addCallback(lambda mv: mv.update(MutableData(b"appended"),
                                        len(self.small_data)))
     d.addCallback(lambda ign: self.sdmf_node.download_best_version())
     d.addCallback(lambda results:
                   self.failUnlessEqual(results, new_data))
     return d
Exemple #24
0
 def test_retrieve_producer_mdmf(self):
     # We should make sure that the retriever is able to pause and stop
     # correctly.
     data = b"contents1" * 100000
     d = self.nodemaker.create_mutable_file(MutableData(data),
                                            version=MDMF_VERSION)
     d.addCallback(lambda node: node.get_best_mutable_version())
     d.addCallback(self._test_retrieve_producer, "MDMF", data)
     return d
Exemple #25
0
 def do_upload_empty_sdmf(self):
     d = self.nm.create_mutable_file(MutableData(""))
     def _then(n):
         assert isinstance(n, MutableFileNode)
         self.sdmf_zero_length_node = n
         assert n._protocol_version == SDMF_VERSION
         return n
     d.addCallback(_then)
     return d
Exemple #26
0
        def _got_rootnode(n):
            # Add a few nodes.
            self._dircap = n.get_uri()
            nm = n._nodemaker
            # The uploaders may run at the same time, so we need two
            # MutableData instances or they'll fight over offsets &c and
            # break.
            mutable_data = MutableData(b"data" * 100000)
            mutable_data2 = MutableData(b"data" * 100000)
            # Add both kinds of mutable node.
            d1 = nm.create_mutable_file(mutable_data, version=MDMF_VERSION)
            d2 = nm.create_mutable_file(mutable_data2, version=SDMF_VERSION)
            # Add an immutable node. We do this through the directory,
            # with add_file.
            immutable_data = upload.Data(b"immutable data" * 100000,
                                         convergence=b"")
            d3 = n.add_file(u"immutable", immutable_data)
            ds = [d1, d2, d3]
            dl = defer.DeferredList(ds)

            def _made_files(args):
                (r1, r2, r3) = args
                self.failUnless(r1[0])
                self.failUnless(r2[0])
                self.failUnless(r3[0])

                # r1, r2, and r3 contain nodes.
                mdmf_node = r1[1]
                sdmf_node = r2[1]
                imm_node = r3[1]

                self._mdmf_uri = mdmf_node.get_uri()
                self._mdmf_readonly_uri = mdmf_node.get_readonly_uri()
                self._sdmf_uri = mdmf_node.get_uri()
                self._sdmf_readonly_uri = sdmf_node.get_readonly_uri()
                self._imm_uri = imm_node.get_uri()

                d1 = n.set_node(u"mdmf", mdmf_node)
                d2 = n.set_node(u"sdmf", sdmf_node)
                return defer.DeferredList([d1, d2])

            # We can now list the directory by listing self._dircap.
            dl.addCallback(_made_files)
            return dl
Exemple #27
0
 def _created(n):
     d = n.download_best_version()
     d.addCallback(
         lambda data: self.assertThat(data, Equals(initial_contents)))
     uploadable2 = MutableData(initial_contents + b"foobarbaz")
     d.addCallback(lambda ignored: n.overwrite(uploadable2))
     d.addCallback(lambda ignored: n.download_best_version())
     d.addCallback(lambda data: self.assertThat(
         data, Equals(initial_contents + b"foobarbaz")))
     return d
Exemple #28
0
 def _created(n):
     d = n.download_best_version()
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 1")))
     upload2 = MutableData(b"contents 2")
     d.addCallback(lambda res: n.overwrite(upload2))
     d.addCallback(lambda res: n.download_best_version())
     d.addCallback(
         lambda res: self.assertThat(res, Equals(b"contents 2")))
     return d
Exemple #29
0
 def test_create_with_too_large_contents(self):
     BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
     BIG_uploadable = MutableData(BIG)
     d = self.nodemaker.create_mutable_file(BIG_uploadable)
     def _created(n):
         other_BIG_uploadable = MutableData(BIG)
         d = n.overwrite(other_BIG_uploadable)
         return d
     d.addCallback(_created)
     return d
Exemple #30
0
def create_mutable_filenode(contents, mdmf=False, all_contents=None):
    # XXX: All of these arguments are kind of stupid.
    if mdmf:
        cap = make_mdmf_mutable_file_cap()
    else:
        cap = make_mutable_file_cap()

    encoding_params = {}
    encoding_params['k'] = 3
    encoding_params['max_segment_size'] = 128 * 1024

    filenode = FakeMutableFileNode(None, None, encoding_params, None,
                                   all_contents)
    filenode.init_from_cap(cap)
    if mdmf:
        filenode.create(MutableData(contents), version=MDMF_VERSION)
    else:
        filenode.create(MutableData(contents), version=SDMF_VERSION)
    return filenode
class DataHandle(unittest.TestCase):
    def setUp(self):
        self.test_data = "Test Data" * 50000
        self.uploadable = MutableData(self.test_data)


    def test_datahandle_read(self):
        chunk_size = 10
        for i in xrange(0, len(self.test_data), chunk_size):
            data = self.uploadable.read(chunk_size)
            data = "".join(data)
            start = i
            end = i + chunk_size
            self.failUnlessEqual(data, self.test_data[start:end])


    def test_datahandle_get_size(self):
        actual_size = len(self.test_data)
        size = self.uploadable.get_size()
        self.failUnlessEqual(size, actual_size)


    def test_datahandle_get_size_out_of_order(self):
        # We should be able to call get_size whenever we want without
        # disturbing the location of the seek pointer.
        chunk_size = 100
        data = self.uploadable.read(chunk_size)
        self.failUnlessEqual("".join(data), self.test_data[:chunk_size])

        # Now get the size.
        size = self.uploadable.get_size()
        self.failUnlessEqual(size, len(self.test_data))

        # Now get more data. We should be right where we left off.
        more_data = self.uploadable.read(chunk_size)
        start = chunk_size
        end = chunk_size * 2
        self.failUnlessEqual("".join(more_data), self.test_data[start:end])
 def setUp(self):
     self.test_data = "Test Data" * 50000
     self.uploadable = MutableData(self.test_data)