def _ucw_error_modifier(old_contents, servermap, first_time): # simulate an UncoordinatedWriteError once calls.append(1) if len(calls) <= 1: raise UncoordinatedWriteError("simulated") new_contents = old_contents + b"line3" return new_contents
def _ucw_error_non_modifier(old_contents, servermap, first_time): # simulate an UncoordinatedWriteError once, and don't actually # modify the contents on subsequent invocations calls.append(1) if len(calls) <= 1: raise UncoordinatedWriteError("simulated") return old_contents
def _got_results_one_share(self, shnum, peerid, got_prefix, got_hash_and_data): self.log("_got_results: got shnum #%d from peerid %s" % (shnum, idlib.shortnodeid_b2a(peerid))) (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = self.verinfo assert len(got_prefix) == len(prefix), (len(got_prefix), len(prefix)) if got_prefix != prefix: msg = "someone wrote to the data since we read the servermap: prefix changed" raise UncoordinatedWriteError(msg) (share_hash_chain, block_hash_tree, share_data) = unpack_share_data(self.verinfo, got_hash_and_data) assert isinstance(share_data, str) # build the block hash tree. SDMF has only one leaf. leaves = [hashutil.block_hash(share_data)] t = hashtree.HashTree(leaves) if list(t) != block_hash_tree: raise CorruptShareError(peerid, shnum, "block hash tree failure") share_hash_leaf = t[0] t2 = hashtree.IncompleteHashTree(N) # root_hash was checked by the signature t2.set_hashes({0: root_hash}) try: t2.set_hashes(hashes=share_hash_chain, leaves={shnum: share_hash_leaf}) except (hashtree.BadHashError, hashtree.NotEnoughHashesError, IndexError), e: msg = "corrupt hashes: %s" % (e, ) raise CorruptShareError(peerid, shnum, msg)
def _done(self, res): if not self._running: return self._running = False now = time.time() self._status.timings["total"] = now - self._started self._status.set_active(False) if isinstance(res, failure.Failure): self.log("Publish done, with failure", failure=res, level=log.WEIRD, umid="nRsR9Q") self._status.set_status("Failed") elif self.surprised: self.log("Publish done, UncoordinatedWriteError", level=log.UNUSUAL) self._status.set_status("UncoordinatedWriteError") # deliver a failure res = failure.Failure(UncoordinatedWriteError()) # TODO: recovery else: self.log("Publish done, success") self._status.set_status("Finished") self._status.set_progress(1.0) eventually(self.done_deferred.callback, res)
def test_repr(self): nmde = NeedMoreDataError(100, 50, 100) self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) ucwe = UncoordinatedWriteError() self.assertTrue("UncoordinatedWriteError" in repr(ucwe), msg=repr(ucwe))
def _try_to_validate_prefix(self, prefix, reader): """ I check that the prefix returned by a candidate server for retrieval matches the prefix that the servermap knows about (and, hence, the prefix that was validated earlier). If it does, I return True, which means that I approve of the use of the candidate server for segment retrieval. If it doesn't, I return False, which means that another server must be chosen. """ (seqnum, root_hash, IV, segsize, datalength, k, N, known_prefix, offsets_tuple) = self.verinfo if known_prefix != prefix: self.log("prefix from share %d doesn't match" % reader.shnum) raise UncoordinatedWriteError("Mismatched prefix -- this could " "indicate an uncoordinated write")
def _always_ucw_error_modifier(old_contents, servermap, first_time): raise UncoordinatedWriteError("simulated")
def test_repr(self): nmde = NeedMoreDataError(100, 50, 100) self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde)) ucwe = UncoordinatedWriteError() self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))