def test_literal_filenode(self): DATA = "I am a short file." u = uri.LiteralFileURI(data=DATA) fn1 = LiteralFileNode(u) fn2 = LiteralFileNode(u) self.failUnlessEqual(fn1, fn2) self.failIfEqual(fn1, "I am not a filenode") self.failIfEqual(fn1, NotANode()) self.failUnlessEqual(fn1.get_uri(), u.to_string()) self.failUnlessEqual(fn1.get_cap(), u) self.failUnlessEqual(fn1.get_readcap(), u) self.failUnless(fn1.is_readonly()) self.failIf(fn1.is_mutable()) self.failIf(fn1.is_unknown()) self.failUnless(fn1.is_allowed_in_immutable_directory()) self.failUnlessEqual(fn1.get_write_uri(), None) self.failUnlessEqual(fn1.get_readonly_uri(), u.to_string()) self.failUnlessEqual(fn1.get_size(), len(DATA)) self.failUnlessEqual(fn1.get_storage_index(), None) fn1.raise_error() fn2.raise_error() d = {} d[fn1] = 1 # exercise __hash__ v = fn1.get_verify_cap() self.failUnlessEqual(v, None) self.failUnlessEqual(fn1.get_repair_cap(), None) d = download_to_data(fn1) def _check(res): self.failUnlessEqual(res, DATA) d.addCallback(_check) d.addCallback(lambda res: download_to_data(fn1, 1, 5)) def _check_segment(res): self.failUnlessEqual(res, DATA[1:1+5]) d.addCallback(_check_segment) d.addCallback(lambda ignored: fn1.get_best_readable_version()) d.addCallback(lambda fn2: self.failUnlessEqual(fn1, fn2)) d.addCallback(lambda ignored: fn1.get_size_of_best_version()) d.addCallback(lambda size: self.failUnlessEqual(size, len(DATA))) d.addCallback(lambda ignored: fn1.download_to_data()) d.addCallback(lambda data: self.failUnlessEqual(data, DATA)) d.addCallback(lambda ignored: fn1.download_best_version()) d.addCallback(lambda data: self.failUnlessEqual(data, DATA)) return d
def _start_download(self): n = self.c0.create_node_from_uri(self.uri) if self.mutable: d = n.download_best_version() else: d = download_to_data(n) return d
def _download_and_check_plaintext(self, ign=None): num_reads = self._count_reads() d = download_to_data(self.filenode) def _after_download(result): self.failUnlessEqual(result, TEST_DATA) return self._count_reads() - num_reads d.addCallback(_after_download) return d
def _read(self): if self._node.is_mutable(): # use the IMutableFileNode API. d = self._node.download_best_version() else: d = download_to_data(self._node) d.addCallback(self._unpack_contents) return d
def download_best_version(self, progress=None): """ Download the best version of this file, returning its contents as a bytestring. Since there is only one version of an immutable file, we download and return the contents of this file. """ d = consumer.download_to_data(self, progress=progress) return d
def download_best_version(self): """ Download the best version of this file, returning its contents as a bytestring. Since there is only one version of an immutable file, we download and return the contents of this file. """ d = consumer.download_to_data(self) return d
def do_test_size(self, size): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] DATA = "p"*size d = self.upload(DATA) d.addCallback(lambda n: download_to_data(n)) def _downloaded(newdata): self.failUnlessEqual(newdata, DATA) d.addCallback(_downloaded) return d
def _attempt_to_download(unused=None): d2 = download_to_data(self.n) def _callb(res): self.fail("Should have gotten an error from attempt to download, not %r" % (res,)) def _errb(f): self.failUnless(f.check(NotEnoughSharesError)) d2.addCallbacks(_callb, _errb) return d2
def _reduce_max_outstanding_requests_and_download(ign): self._hang_shares(range(5)) n = self.c0.create_node_from_uri(self.uri) n._cnode._maybe_create_download_node() self._sf = n._cnode._node._sharefinder self._sf.max_outstanding_requests = 5 self._sf.OVERDUE_TIMEOUT = 1000.0 d2 = download_to_data(n) # start download, but don't wait for it to complete yet def _done(res): done.append(res) # we will poll for this later d2.addBoth(_done)
def _then_download(unused=None): d2 = download_to_data(self.n) def _after_download_callb(result): self.fail() # should have gotten an errback instead return result def _after_download_errb(failure): failure.trap(NotEnoughSharesError) return None # success! d2.addCallbacks(_after_download_callb, _after_download_errb) return d2
def _attempt_to_download(unused=None): d2 = download_to_data(self.n) def _callb(res): self.fail( "Should have gotten an error from attempt to download, not %r" % (res, )) def _errb(f): self.failUnless(f.check(NotEnoughSharesError)) d2.addCallbacks(_callb, _errb) return d2
def test_repair_from_deletion_of_1(self): """ Repair replaces a share that got deleted. """ self.basedir = "repairer/Repairer/repair_from_deletion_of_1" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback( lambda ignored: self.delete_shares_numbered(self.uri, [2])) d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check_and_repair( Monitor(), verify=False)) def _check_results(crr): self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults) pre = crr.get_pre_repair_results() self.failUnlessIsInstance(pre, check_results.CheckResults) post = crr.get_post_repair_results() self.failUnlessIsInstance(post, check_results.CheckResults) delta_reads, delta_allocates, delta_writes = self._get_delta_counts( ) self.failIfBigger(delta_reads, MAX_DELTA_READS) self.failIfBigger(delta_allocates, DELTA_WRITES_PER_SHARE) self.failIf(pre.is_healthy()) self.failUnless(post.is_healthy()) # Now we inspect the filesystem to make sure that it has 10 # shares. shares = self.find_uri_shares(self.uri) self.failIf(len(shares) < 10) d.addCallback(_check_results) d.addCallback( lambda ignored: self.c0_filenode.check(Monitor(), verify=True)) d.addCallback(lambda vr: self.failUnless(vr.is_healthy())) # Now we delete seven of the other shares, then try to download the # file and assert that it succeeds at downloading and has the right # contents. This can't work unless it has already repaired the # previously-deleted share #2. d.addCallback(lambda ignored: self.delete_shares_numbered( self.uri, range(3, 10 + 1))) d.addCallback(lambda ignored: download_to_data(self.c1_filenode)) d.addCallback( lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA)) return d
def _reduce_max_outstanding_requests_and_download(ign): # we need to hang the first 5 servers, so we have to # figure out where the shares were placed. si = uri.from_string(self.uri).get_storage_index() placed = self.c0.storage_broker.get_servers_for_psi(si) self._hang([(s.get_serverid(), s) for s in placed[:5]]) n = self.c0.create_node_from_uri(self.uri) n._cnode._maybe_create_download_node() self._sf = n._cnode._node._sharefinder self._sf.max_outstanding_requests = 5 self._sf.OVERDUE_TIMEOUT = 1000.0 d2 = download_to_data(n) # start download, but don't wait for it to complete yet def _done(res): done.append(res) # we will poll for this later d2.addBoth(_done)
def test_repair_from_deletion_of_1(self): """ Repair replaces a share that got deleted. """ self.basedir = "repairer/Repairer/repair_from_deletion_of_1" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, [2])) d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(Monitor(), verify=False)) def _check_results(crr): self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults) pre = crr.get_pre_repair_results() self.failUnlessIsInstance(pre, check_results.CheckResults) post = crr.get_post_repair_results() self.failUnlessIsInstance(post, check_results.CheckResults) delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, MAX_DELTA_READS) self.failIfBigger(delta_allocates, DELTA_WRITES_PER_SHARE) self.failIf(pre.is_healthy()) self.failUnless(post.is_healthy()) # Now we inspect the filesystem to make sure that it has 10 # shares. shares = self.find_uri_shares(self.uri) self.failIf(len(shares) < 10) d.addCallback(_check_results) d.addCallback(lambda ignored: self.c0_filenode.check(Monitor(), verify=True)) d.addCallback(lambda vr: self.failUnless(vr.is_healthy())) # Now we delete seven of the other shares, then try to download the # file and assert that it succeeds at downloading and has the right # contents. This can't work unless it has already repaired the # previously-deleted share #2. d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, range(3, 10+1))) d.addCallback(lambda ignored: download_to_data(self.c1_filenode)) d.addCallback(lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA)) return d
def download_to_data(self, progress=None): return download_to_data(self, progress=progress)
def _wait_for_data(self, n): if self.mutable: d = n.download_best_version() else: d = download_to_data(n) return d
def _uploaded(res): n = c0.create_node_from_uri(res.uri) return download_to_data(n)
def _uploaded(res): n = c0.create_node_from_uri(res.get_uri()) return download_to_data(n)