示例#1
0
    def test_deep_check_renderer(self):
        status = check_results.DeepCheckResults("fake-root-si")
        status.add_check(
            FakeCheckResults("<unhealthy/unrecoverable>", False, False),
            (u"fake", u"unhealthy", u"unrecoverable"))
        status.add_check(FakeCheckResults("<healthy/recoverable>", True, True),
                         (u"fake", u"healthy", u"recoverable"))
        status.add_check(
            FakeCheckResults("<healthy/unrecoverable>", True, False),
            (u"fake", u"healthy", u"unrecoverable"))
        status.add_check(
            FakeCheckResults("<unhealthy/unrecoverable>", False, True),
            (u"fake", u"unhealthy", u"recoverable"))

        monitor = Monitor()
        monitor.set_status(status)

        elem = web_check_results.DeepCheckResultsRendererElement(monitor)
        doc = self.render_element(elem)
        soup = BeautifulSoup(doc, 'html5lib')

        assert_soup_has_favicon(self, soup)

        assert_soup_has_tag_with_content(self, soup, u"title",
                                         u"Tahoe-LAFS - Deep Check Results")

        assert_soup_has_tag_with_content(self, soup, u"h1",
                                         "Deep-Check Results for root SI=")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Checked: 4")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Healthy: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Unhealthy: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Unrecoverable: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Corrupt Shares: 4")

        assert_soup_has_tag_with_content(
            self, soup, u"h2", u"Files/Directories That Had Problems:")

        assert_soup_has_tag_with_content(
            self, soup, u"li", u"fake/unhealthy/recoverable: fake summary")

        assert_soup_has_tag_with_content(
            self, soup, u"li", u"fake/unhealthy/unrecoverable: fake summary")

        assert_soup_has_tag_with_content(
            self, soup, u"h2", u"Servers on which corrupt shares were found")

        assert_soup_has_tag_with_content(self, soup, u"h2", u"Corrupt Shares")

        assert_soup_has_tag_with_content(self, soup, u"h2", u"All Results")
示例#2
0
    def test_merge(self):
        self.old_shares = []
        d = self.publish_multiple()
        # repair will refuse to merge multiple highest seqnums unless you
        # pass force=True
        d.addCallback(lambda res: self._set_versions({
            0: 3,
            2: 3,
            4: 3,
            6: 3,
            8: 3,
            1: 4,
            3: 4,
            5: 4,
            7: 4,
            9: 4
        }))
        d.addCallback(self.copy_shares)
        d.addCallback(lambda res: self._fn.check(Monitor()))

        def _try_repair(check_results):
            ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
            d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
                                 self._fn.repair, check_results)
            d2.addCallback(self.copy_shares)
            d2.addCallback(self.failIfSharesChanged)
            d2.addCallback(lambda res: check_results)
            return d2

        d.addCallback(_try_repair)
        d.addCallback(
            lambda check_results: self._fn.repair(check_results, force=True))

        # this should give us 10 shares of the highest roothash
        def _check_repair_results(rres):
            self.assertThat(rres.get_successful(), Equals(True))
            pass  # TODO

        d.addCallback(_check_repair_results)
        d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))

        def _check_smap(smap):
            self.assertThat(smap.recoverable_versions(), HasLength(1))
            self.assertThat(smap.unrecoverable_versions(), HasLength(0))
            # now, which should have won?
            roothash_s4a = self.get_roothash_for(3)
            roothash_s4b = self.get_roothash_for(4)
            if roothash_s4b > roothash_s4a:
                expected_contents = self.CONTENTS[4]
            else:
                expected_contents = self.CONTENTS[3]
            new_versionid = smap.best_recoverable_version()
            self.assertThat(new_versionid[0], Equals(5))  # seqnum 5
            d2 = self._fn.download_version(smap, new_versionid)
            d2.addCallback(self.assertEqual, expected_contents)
            return d2

        d.addCallback(_check_smap)
        return d
示例#3
0
 def test_verify_one_bad_block(self):
     d = corrupt(None, self._storage, "share_data", [9])  # bad blocks
     # the Verifier *will* notice this, since it examines every byte
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_one_bad_block")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "block hash tree failure", "test_verify_one_bad_block")
     return d
示例#4
0
 def test_check_not_enough_shares(self):
     for shares in list(self._storage._peers.values()):
         for shnum in list(shares.keys()):
             if shnum > 0:
                 del shares[shnum]
     d = self._fn.check(Monitor())
     d.addCallback(self.check_bad, "test_check_not_enough_shares")
     return d
示例#5
0
 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
     if oldmap is None:
         oldmap = ServerMap()
     if sb is None:
         sb = self._storage_broker
     smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
     d = smu.update()
     return d
示例#6
0
    def test_repair_from_deletion_of_7(self):
        """ Repair replaces seven shares that got deleted. """
        self.basedir = "repairer/Repairer/repair_from_deletion_of_7"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()
        d.addCallback(
            lambda ignored: self.delete_shares_numbered(self.uri, range(7)))
        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(
            Monitor(), verify=False))

        def _check_results(crr):
            self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults)
            pre = crr.get_pre_repair_results()
            self.failUnlessIsInstance(pre, check_results.CheckResults)
            post = crr.get_post_repair_results()
            self.failUnlessIsInstance(post, check_results.CheckResults)
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts(
            )

            self.failIfBigger(delta_reads, MAX_DELTA_READS)
            self.failIfBigger(delta_allocates, (DELTA_WRITES_PER_SHARE * 7))
            self.failIf(pre.is_healthy())
            self.failUnless(post.is_healthy(), post.as_dict())

            # Make sure we really have 10 shares.
            shares = self.find_uri_shares(self.uri)
            self.failIf(len(shares) < 10)

        d.addCallback(_check_results)

        d.addCallback(
            lambda ignored: self.c0_filenode.check(Monitor(), verify=True))
        d.addCallback(lambda vr: self.failUnless(vr.is_healthy()))

        # Now we delete seven of the other shares, then try to download the
        # file and assert that it succeeds at downloading and has the right
        # contents. This can't work unless it has already repaired the
        # previously-deleted share #2.

        d.addCallback(lambda ignored: self.delete_shares_numbered(
            self.uri, range(3, 10 + 1)))
        d.addCallback(lambda ignored: download_to_data(self.c1_filenode))
        d.addCallback(
            lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA))
        return d
示例#7
0
 def _then(ign):
     ss = self.g.servers_by_number[0]
     # we want to delete the share corresponding to the server
     # we're making not-respond
     share = next(
         ss._get_bucket_shares(self.c0_filenode.get_storage_index()))[0]
     self.delete_shares_numbered(self.uri, [share])
     return self.c0_filenode.check_and_repair(Monitor())
示例#8
0
    def test_harness(self):
        # This test is actually to make sure our test harness works, rather
        # than testing anything about Tahoe code itself.

        self.basedir = "repairer/Repairer/test_code"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()

        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        def _stash_shares(oldshares):
            self.oldshares = oldshares
        d.addCallback(_stash_shares)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        def _compare(newshares):
            self.failUnlessEqual(newshares, self.oldshares)
        d.addCallback(_compare)

        def _delete_8(ignored):
            shnum = self.oldshares[0][0]
            self.delete_shares_numbered(self.uri, [shnum])
            for sh in self.oldshares[1:8]:
                self.delete_share(sh)
        d.addCallback(_delete_8)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        d.addCallback(lambda shares: self.failUnlessEqual(len(shares), 2))

        d.addCallback(lambda ignored:
                      self.shouldFail(NotEnoughSharesError, "then_download",
                                      None,
                                      download_to_data, self.c1_filenode))

        d.addCallback(lambda ignored:
                      self.shouldFail(NotEnoughSharesError, "then_repair",
                                      None,
                                      self.c1_filenode.check_and_repair,
                                      Monitor(), verify=False))

        # test share corruption
        def _test_corrupt(ignored):
            olddata = {}
            shares = self.find_uri_shares(self.uri)
            for (shnum, serverid, sharefile) in shares:
                olddata[ (shnum, serverid) ] = open(sharefile, "rb").read()
            for sh in shares:
                self.corrupt_share(sh, common._corrupt_uri_extension)
            for (shnum, serverid, sharefile) in shares:
                newdata = open(sharefile, "rb").read()
                self.failIfEqual(olddata[ (shnum, serverid) ], newdata)
        d.addCallback(_test_corrupt)

        def _remove_all(ignored):
            for sh in self.find_uri_shares(self.uri):
                self.delete_share(sh)
        d.addCallback(_remove_all)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        d.addCallback(lambda shares: self.failUnlessEqual(shares, []))

        return d
示例#9
0
 def test_verify_one_bad_encprivkey_uncheckable(self):
     d = corrupt(None, self._storage, "enc_privkey", [9])  # bad privkey
     readonly_fn = self._fn.get_readonly()
     # a read-only node has no way to validate the privkey
     d.addCallback(
         lambda ignored: readonly_fn.check(Monitor(), verify=True))
     d.addCallback(self.check_good,
                   "test_verify_one_bad_encprivkey_uncheckable")
     return d
示例#10
0
 def test_verify_mdmf_bad_encprivkey(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "enc_privkey", [0]))
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "privkey", "test_verify_mdmf_bad_encprivkey")
     return d
示例#11
0
 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "enc_privkey", [1]))
     d.addCallback(lambda ignored: self._fn.get_readonly())
     d.addCallback(lambda fn: fn.check(Monitor(), verify=True))
     d.addCallback(self.check_good,
                   "test_verify_mdmf_bad_encprivkey_uncheckable")
     return d
示例#12
0
 def _start(ign):
     d = defer.succeed(None)
     for i in range(len(self.sh0_orig)):
         d.addCallback(_corrupt, i)
         d.addCallback(
             lambda ign: self.c1_filenode.check(Monitor(), verify=True))
         d.addCallback(_did_check, i)
         d.addCallback(_fix_sh0)
     return d
示例#13
0
 def test_cancellation(self):
     """The monitor can be cancelled."""
     m = Monitor()
     self.assertFalse(m.is_cancelled())
     m.raise_if_cancelled()
     m.cancel()
     self.assertTrue(m.is_cancelled())
     with self.assertRaises(OperationCancelledError):
         m.raise_if_cancelled()
示例#14
0
    def test_875(self):
        self.basedir = "checker/AddLease/875"
        self.set_up_grid(num_servers=1)
        c0 = self.g.clients[0]
        c0.encoding_params['happy'] = 1
        self.uris = {}
        DATA = "data" * 100
        d = c0.upload(Data(DATA, convergence=""))
        def _stash_immutable(ur):
            self.imm = c0.create_node_from_uri(ur.get_uri())
        d.addCallback(_stash_immutable)
        d.addCallback(lambda ign:
            c0.create_mutable_file(MutableData("contents")))
        def _stash_mutable(node):
            self.mut = node
        d.addCallback(_stash_mutable)

        def _check_cr(cr, which):
            self.failUnless(cr.is_healthy(), which)

        # these two should work normally
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-normal")
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-normal")

        really_did_break = []
        # now break the server's remote_add_lease call
        def _break_add_lease(ign):
            def broken_add_lease(*args, **kwargs):
                really_did_break.append(1)
                raise KeyError("intentional failure, should be ignored")
            assert self.g.servers_by_number[0].remote_add_lease
            self.g.servers_by_number[0].remote_add_lease = broken_add_lease
        d.addCallback(_break_add_lease)

        # and confirm that the files still look healthy
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-broken")
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-broken")

        d.addCallback(lambda ign: self.failUnless(really_did_break))
        return d
示例#15
0
 def _update_servermap(self, mode=MODE_WRITE, update_range=None):
     """
     I update the servermap. I return a Deferred that fires when the
     servermap update is done.
     """
     if update_range:
         u = ServermapUpdater(self._node,
                              self._storage_broker,
                              Monitor(),
                              self._servermap,
                              mode=mode,
                              update_range=update_range)
     else:
         u = ServermapUpdater(self._node,
                              self._storage_broker,
                              Monitor(),
                              self._servermap,
                              mode=mode)
     return u.update()
示例#16
0
            def _after_repair(checkandrepairresults):
                prerepairres = checkandrepairresults.get_pre_repair_results()
                postrepairres = checkandrepairresults.get_post_repair_results()
                after_repair_reads = self._count_reads()
                after_repair_allocates = self._count_writes()

                # The "* 2" in reads is because you might read a whole share
                # before figuring out that it is corrupted. It might be
                # possible to make this delta reads number a little tighter.
                self.failIf(
                    after_repair_reads - before_repair_reads >
                    (MAX_DELTA_READS * 2),
                    (after_repair_reads, before_repair_reads))
                # The "* 2" in writes is because each server has two shares,
                # and it is reasonable for repairer to conclude that there
                # are two shares that it should upload, if the server fails
                # to serve the first share.
                self.failIf(
                    after_repair_allocates - before_repair_allocates >
                    (DELTA_WRITES_PER_SHARE * 2),
                    (after_repair_allocates, before_repair_allocates))
                self.failIf(prerepairres.is_healthy(),
                            (prerepairres.data, corruptor_func))
                self.failUnless(postrepairres.is_healthy(),
                                (postrepairres.data, corruptor_func))

                # Now we inspect the filesystem to make sure that it has 10
                # shares.
                shares = self.find_all_shares()
                self.failIf(len(shares) < 10)

                # Now we assert that the verifier reports the file as healthy.
                d3 = self.filenode.check(Monitor(), verify=True)

                def _after_verify(verifyresults):
                    self.failUnless(verifyresults.is_healthy())

                d3.addCallback(_after_verify)

                # Now we delete seven of the other shares, then try to
                # download the file and assert that it succeeds at
                # downloading and has the right contents. This can't work
                # unless it has already repaired the previously-corrupted share.
                def _then_delete_7_and_try_a_download(unused=None):
                    shnums = range(10)
                    shnums.remove(shnum)
                    random.shuffle(shnums)
                    for sharenum in shnums[:7]:
                        self._delete_a_share(sharenum=sharenum)

                    return self._download_and_check_plaintext()

                d3.addCallback(_then_delete_7_and_try_a_download)
                return d3
示例#17
0
    def test_check_mdmf_no_shares(self):
        d = self.publish_mdmf()

        def _then(ignored):
            for share in list(self._storage._peers.values()):
                share.clear()

        d.addCallback(_then)
        d.addCallback(lambda ignored: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
        return d
示例#18
0
 def test_verify_mdmf_one_bad_block(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "share_data", [1]))
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     # We should find one bad block here
     d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "block hash tree failure",
                   "test_verify_mdmf_one_bad_block")
     return d
示例#19
0
    def test_check_mdmf_not_enough_shares(self):
        d = self.publish_mdmf()

        def _then(ignored):
            for shares in list(self._storage._peers.values()):
                for shnum in list(shares.keys()):
                    if shnum > 0:
                        del shares[shnum]

        d.addCallback(_then)
        d.addCallback(lambda ignored: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
        return d
示例#20
0
    def test_check_without_verify(self):
        """Check says the file is healthy when none of the shares have been
        touched. It says that the file is unhealthy when all of them have
        been removed. It doesn't use any reads.
        """
        self.basedir = "repairer/Verifier/check_without_verify"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()
        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(
            lambda ignored: self.c0_filenode.check(Monitor(), verify=False))

        def _check(cr):
            self.failUnless(cr.is_healthy())
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts(
            )
            self.failIfBigger(delta_reads, 0)

        d.addCallback(_check)

        def _remove_all(ignored):
            for sh in self.find_uri_shares(self.uri):
                self.delete_share(sh)

        d.addCallback(_remove_all)

        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(
            lambda ignored: self.c0_filenode.check(Monitor(), verify=False))

        def _check2(cr):
            self.failIf(cr.is_healthy())
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts(
            )
            self.failIfBigger(delta_reads, 0)

        d.addCallback(_check2)
        return d
示例#21
0
    def test_finish(self):
        """The monitor can finish."""
        m = Monitor()
        self.assertFalse(m.is_finished())
        d = m.when_done()
        self.assertNoResult(d)

        result = m.finish(300)
        self.assertEqual(result, 300)
        self.assertEqual(m.get_status(), 300)
        self.assertTrue(m.is_finished())

        d.addBoth(self.assertEqual, 300)
        return d
    def test_multiple_versions(self):
        # if we see a mix of versions in the grid, download_best_version
        # should get the latest one
        self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
        d = self._fn.download_best_version()
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
        # and the checker should report problems
        d.addCallback(lambda res: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_multiple_versions")

        # but if everything is at version 2, that's what we should download
        d.addCallback(lambda res:
                      self._set_versions(dict([(i,2) for i in range(10)])))
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
        # if exactly one share is at version 3, we should still get v2
        d.addCallback(lambda res:
                      self._set_versions({0:3}))
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
        # but the servermap should see the unrecoverable version. This
        # depends upon the single newer share being queried early.
        d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
        def _check_smap(smap):
            self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
            newer = smap.unrecoverable_newer_versions()
            self.failUnlessEqual(len(newer), 1)
            verinfo, health = list(newer.items())[0]
            self.failUnlessEqual(verinfo[0], 4)
            self.failUnlessEqual(health, (1,3))
            self.failIf(smap.needs_merge())
        d.addCallback(_check_smap)
        # if we have a mix of two parallel versions (s4a and s4b), we could
        # recover either
        d.addCallback(lambda res:
                      self._set_versions({0:3,2:3,4:3,6:3,8:3,
                                          1:4,3:4,5:4,7:4,9:4}))
        d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
        def _check_smap_mixed(smap):
            self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
            newer = smap.unrecoverable_newer_versions()
            self.failUnlessEqual(len(newer), 0)
            self.failUnless(smap.needs_merge())
        d.addCallback(_check_smap_mixed)
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
                                                  res == self.CONTENTS[4]))
        return d
示例#23
0
 def make_servermap(self,
                    mode=MODE_CHECK,
                    fn=None,
                    sb=None,
                    update_range=None):
     if fn is None:
         fn = self._fn
     if sb is None:
         sb = self._storage_broker
     smu = ServermapUpdater(fn,
                            sb,
                            Monitor(),
                            ServerMap(),
                            mode,
                            update_range=update_range)
     d = smu.update()
     return d
示例#24
0
    def deep_traverse(self, walker):
        """Perform a recursive walk, using this dirnode as a root, notifying
        the 'walker' instance of everything I encounter.

        I call walker.enter_directory(parent, children) once for each dirnode
        I visit, immediately after retrieving the list of children. I pass in
        the parent dirnode and the dict of childname->(childnode,metadata).
        This function should *not* traverse the children: I will do that.
        enter_directory() is most useful for the deep-stats number that
        counts how large a directory is.

        I call walker.add_node(node, path) for each node (both files and
        directories) I can reach. Most work should be done here.

        I avoid loops by keeping track of verifier-caps and refusing to call
        walker.add_node() or traverse a node that I've seen before. This
        means that any file or directory will only be given to the walker
        once. If files or directories are referenced multiple times by a
        directory structure, this may appear to under-count or miss some of
        them.

        I return a Monitor which can be used to wait for the operation to
        finish, learn about its progress, or cancel the operation.
        """

        # this is just a tree-walker, except that following each edge
        # requires a Deferred. We used to use a ConcurrencyLimiter to limit
        # fanout to 10 simultaneous operations, but the memory load of the
        # queued operations was excessive (in one case, with 330k dirnodes,
        # it caused the process to run into the 3.0GB-ish per-process 32bit
        # linux memory limit, and crashed). So we use a single big Deferred
        # chain, and do a strict depth-first traversal, one node at a time.
        # This can be slower, because we aren't pipelining directory reads,
        # but it brought the memory footprint down by roughly 50%.

        monitor = Monitor()
        walker.set_monitor(monitor)

        found = set([self.get_verify_cap()])
        d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
        d.addCallback(lambda ignored: walker.finish())
        d.addBoth(monitor.finish)
        d.addErrback(lambda f: None)

        return monitor
示例#25
0
    def _help_test_verify(self, corruptor, judgement, shnum=0, debug=False):
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()
        d.addCallback(lambda ignored: self._stash_counts())

        d.addCallback(lambda ignored:
                      self.corrupt_shares_numbered(self.uri, [shnum],corruptor,debug=debug))
        d.addCallback(lambda ignored:
                      self.c1_filenode.check(Monitor(), verify=True))
        def _check(vr):
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts()
            self.failIfBigger(delta_reads, MAX_DELTA_READS)
            try:
                judgement(vr)
            except unittest.FailTest, e:
                # FailTest just uses e.args[0] == str
                new_arg = str(e.args[0]) + "\nvr.data is: " + str(vr.as_dict())
                e.args = (new_arg,)
                raise
示例#26
0
    def test_non_merge(self):
        self.old_shares = []
        d = self.publish_multiple()
        # repair should not refuse a repair that doesn't need to merge. In
        # this case, we combine v2 with v3. The repair should ignore v2 and
        # copy v3 into a new v5.
        d.addCallback(lambda res: self._set_versions({
            0: 2,
            2: 2,
            4: 2,
            6: 2,
            8: 2,
            1: 3,
            3: 3,
            5: 3,
            7: 3,
            9: 3
        }))
        d.addCallback(lambda res: self._fn.check(Monitor()))
        d.addCallback(lambda check_results: self._fn.repair(check_results))

        # this should give us 10 shares of v3
        def _check_repair_results(rres):
            self.assertThat(rres.get_successful(), Equals(True))
            pass  # TODO

        d.addCallback(_check_repair_results)
        d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))

        def _check_smap(smap):
            self.assertThat(smap.recoverable_versions(), HasLength(1))
            self.assertThat(smap.unrecoverable_versions(), HasLength(0))
            # now, which should have won?
            expected_contents = self.CONTENTS[3]
            new_versionid = smap.best_recoverable_version()
            self.assertThat(new_versionid[0], Equals(5))  # seqnum 5
            d2 = self._fn.download_version(smap, new_versionid)
            d2.addCallback(self.assertEquals, expected_contents)
            return d2

        d.addCallback(_check_smap)
        return d
示例#27
0
    def _test_whether_checkandrepairable(self, publisher, nshares,
                                         expected_result):
        """
        Like the _test_whether_repairable tests, but invoking check_and_repair
        instead of invoking check and then invoking repair.
        """
        d = publisher()

        def _delete_some_shares(ign):
            shares = self._storage._peers
            for peerid in shares:
                for shnum in list(shares[peerid]):
                    if shnum >= nshares:
                        del shares[peerid][shnum]

        d.addCallback(_delete_some_shares)
        d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
        d.addCallback(lambda crr: self.assertThat(crr.get_repair_successful(),
                                                  Equals(expected_result)))
        return d
示例#28
0
    def test_repair_nop(self):
        self.old_shares = []
        d = self.publish_one()
        d.addCallback(self.copy_shares)
        d.addCallback(lambda res: self._fn.check(Monitor()))
        d.addCallback(lambda check_results: self._fn.repair(check_results))

        def _check_results(rres):
            self.assertThat(IRepairResults.providedBy(rres), Equals(True))
            self.assertThat(rres.get_successful(), Equals(True))
            # TODO: examine results

            self.copy_shares()

            initial_shares = self.old_shares[0]
            new_shares = self.old_shares[1]
            # TODO: this really shouldn't change anything. When we implement
            # a "minimal-bandwidth" repairer", change this test to assert:
            #self.assertThat(new_shares, Equals(initial_shares))

            # all shares should be in the same place as before
            self.assertThat(set(initial_shares.keys()),
                            Equals(set(new_shares.keys())))
            # but they should all be at a newer seqnum. The IV will be
            # different, so the roothash will be too.
            for key in initial_shares:
                (version0, seqnum0, root_hash0, IV0, k0, N0, segsize0,
                 datalen0, o0) = unpack_header(initial_shares[key])
                (version1, seqnum1, root_hash1, IV1, k1, N1, segsize1,
                 datalen1, o1) = unpack_header(new_shares[key])
                self.assertThat(version0, Equals(version1))
                self.assertThat(seqnum0 + 1, Equals(seqnum1))
                self.assertThat(k0, Equals(k1))
                self.assertThat(N0, Equals(N1))
                self.assertThat(segsize0, Equals(segsize1))
                self.assertThat(datalen0, Equals(datalen1))

        d.addCallback(_check_results)
        return d
示例#29
0
    def test_repair_empty(self):
        # bug 1689: delete one share of an empty mutable file, then repair.
        # In the buggy version, the check that precedes the retrieve+publish
        # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
        # privkey that repair needs.
        d = self.publish_sdmf(b"")

        def _delete_one_share(ign):
            shares = self._storage._peers
            for peerid in shares:
                for shnum in list(shares[peerid]):
                    if shnum == 0:
                        del shares[peerid][shnum]

        d.addCallback(_delete_one_share)
        d.addCallback(lambda ign: self._fn2.check(Monitor()))
        d.addCallback(lambda check_results: self._fn2.repair(check_results))

        def _check(crr):
            self.assertThat(crr.get_successful(), Equals(True))

        d.addCallback(_check)
        return d
示例#30
0
    def _test_whether_repairable(self, publisher, nshares, expected_result):
        d = publisher()

        def _delete_some_shares(ign):
            shares = self._storage._peers
            for peerid in shares:
                for shnum in list(shares[peerid]):
                    if shnum >= nshares:
                        del shares[peerid][shnum]

        d.addCallback(_delete_some_shares)
        d.addCallback(lambda ign: self._fn.check(Monitor()))

        def _check(cr):
            self.assertThat(cr.is_healthy(), Equals(False))
            self.assertThat(cr.is_recoverable(), Equals(expected_result))
            return cr

        d.addCallback(_check)
        d.addCallback(lambda check_results: self._fn.repair(check_results))
        d.addCallback(lambda crr: self.assertThat(crr.get_successful(),
                                                  Equals(expected_result)))
        return d