def test_check_without_verify(self):
        """Check says the file is healthy when none of the shares have been
        touched. It says that the file is unhealthy when all of them have
        been removed. It doesn't use any reads.
        """
        self.basedir = "repairer/Verifier/check_without_verify"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()
        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(lambda ignored:
                      self.c0_filenode.check(Monitor(), verify=False))
        def _check(cr):
            self.failUnless(cr.is_healthy())
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts()
            self.failIfBigger(delta_reads, 0)
        d.addCallback(_check)

        def _remove_all(ignored):
            for sh in self.find_uri_shares(self.uri):
                self.delete_share(sh)
        d.addCallback(_remove_all)

        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(lambda ignored:
                      self.c0_filenode.check(Monitor(), verify=False))
        def _check2(cr):
            self.failIf(cr.is_healthy())
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts()
            self.failIfBigger(delta_reads, 0)
        d.addCallback(_check2)
        return d
Exemple #2
0
    def test_deep_check_renderer(self):
        status = check_results.DeepCheckResults(b"fake-root-si")
        status.add_check(
            FakeCheckResults(b"<unhealthy/unrecoverable>", False, False),
            (u"fake", u"unhealthy", u"unrecoverable"))
        status.add_check(
            FakeCheckResults(b"<healthy/recoverable>", True, True),
            (u"fake", u"healthy", u"recoverable"))
        status.add_check(
            FakeCheckResults(b"<healthy/unrecoverable>", True, False),
            (u"fake", u"healthy", u"unrecoverable"))
        status.add_check(
            FakeCheckResults(b"<unhealthy/unrecoverable>", False, True),
            (u"fake", u"unhealthy", u"recoverable"))

        monitor = Monitor()
        monitor.set_status(status)

        elem = web_check_results.DeepCheckResultsRendererElement(monitor)
        doc = self.render_element(elem)
        soup = BeautifulSoup(doc, 'html5lib')

        assert_soup_has_favicon(self, soup)

        assert_soup_has_tag_with_content(self, soup, u"title",
                                         u"Tahoe-LAFS - Deep Check Results")

        assert_soup_has_tag_with_content(self, soup, u"h1",
                                         "Deep-Check Results for root SI=")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Checked: 4")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Healthy: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Unhealthy: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Objects Unrecoverable: 2")

        assert_soup_has_tag_with_content(self, soup, u"li",
                                         u"Corrupt Shares: 4")

        assert_soup_has_tag_with_content(
            self, soup, u"h2", u"Files/Directories That Had Problems:")

        assert_soup_has_tag_with_content(
            self, soup, u"li", u"fake/unhealthy/recoverable: fake summary")

        assert_soup_has_tag_with_content(
            self, soup, u"li", u"fake/unhealthy/unrecoverable: fake summary")

        assert_soup_has_tag_with_content(
            self, soup, u"h2", u"Servers on which corrupt shares were found")

        assert_soup_has_tag_with_content(self, soup, u"h2", u"Corrupt Shares")

        assert_soup_has_tag_with_content(self, soup, u"h2", u"All Results")
Exemple #3
0
 def test_cancellation(self):
     """The monitor can be cancelled."""
     m = Monitor()
     self.assertFalse(m.is_cancelled())
     m.raise_if_cancelled()
     m.cancel()
     self.assertTrue(m.is_cancelled())
     with self.assertRaises(OperationCancelledError):
         m.raise_if_cancelled()
 def _POST_check(self, req):
     verify = boolean_of_arg(get_arg(req, "verify", "false"))
     repair = boolean_of_arg(get_arg(req, "repair", "false"))
     add_lease = boolean_of_arg(get_arg(req, "add-lease", "false"))
     if repair:
         d = self.node.check_and_repair(Monitor(), verify, add_lease)
         d.addCallback(self._maybe_literal, CheckAndRepairResultsRenderer)
     else:
         d = self.node.check(Monitor(), verify, add_lease)
         d.addCallback(self._maybe_literal, CheckResultsRenderer)
     return d
Exemple #5
0
    def test_875(self):
        self.basedir = "checker/AddLease/875"
        self.set_up_grid(num_servers=1)
        c0 = self.g.clients[0]
        c0.encoding_params['happy'] = 1
        self.uris = {}
        DATA = "data" * 100
        d = c0.upload(Data(DATA, convergence=""))

        def _stash_immutable(ur):
            self.imm = c0.create_node_from_uri(ur.get_uri())

        d.addCallback(_stash_immutable)
        d.addCallback(
            lambda ign: c0.create_mutable_file(MutableData("contents")))

        def _stash_mutable(node):
            self.mut = node

        d.addCallback(_stash_mutable)

        def _check_cr(cr, which):
            self.failUnless(cr.is_healthy(), which)

        # these two should work normally
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-normal")
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-normal")

        really_did_break = []

        # now break the server's remote_add_lease call
        def _break_add_lease(ign):
            def broken_add_lease(*args, **kwargs):
                really_did_break.append(1)
                raise KeyError("intentional failure, should be ignored")

            assert self.g.servers_by_number[0].remote_add_lease
            self.g.servers_by_number[0].remote_add_lease = broken_add_lease

        d.addCallback(_break_add_lease)

        # and confirm that the files still look healthy
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-broken")
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-broken")

        d.addCallback(lambda ign: self.failUnless(really_did_break))
        return d
Exemple #6
0
    def test_literal_filenode(self):
        DATA = "I am a short file."
        u = uri.LiteralFileURI(data=DATA)
        fn1 = LiteralFileNode(u)

        d = fn1.check(Monitor())
        def _check_checker_results(cr):
            self.failUnlessEqual(cr, None)
        d.addCallback(_check_checker_results)

        d.addCallback(lambda res: fn1.check(Monitor(), verify=True))
        d.addCallback(_check_checker_results)

        return d
Exemple #7
0
 def _update_servermap(self, mode=MODE_WRITE, update_range=None):
     """
     I update the servermap. I return a Deferred that fires when the
     servermap update is done.
     """
     if update_range:
         u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
                              self._servermap,
                              mode=mode,
                              update_range=update_range)
     else:
         u = ServermapUpdater(self._node, self._storage_broker, Monitor(),
                              self._servermap,
                              mode=mode)
     return u.update()
Exemple #8
0
 def test_verify_one_bad_encprivkey(self):
     d = corrupt(None, self._storage, "enc_privkey", [9])  # bad privkey
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "invalid privkey", "test_verify_one_bad_encprivkey")
     return d
Exemple #9
0
 def test_verify_one_bad_sharehash(self):
     d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "corrupt hashes", "test_verify_one_bad_sharehash")
     return d
Exemple #10
0
 def test_check_mdmf_all_bad_blocks(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "share_data"))
     d.addCallback(lambda ignored: self._fn.check(Monitor()))
     d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
     return d
Exemple #11
0
    def test_multiple_versions(self):
        # if we see a mix of versions in the grid, download_best_version
        # should get the latest one
        self._set_versions(dict([(i, 2) for i in (0, 2, 4, 6, 8)]))
        d = self._fn.download_best_version()
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
        # and the checker should report problems
        d.addCallback(lambda res: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_multiple_versions")

        # but if everything is at version 2, that's what we should download
        d.addCallback(
            lambda res: self._set_versions(dict([(i, 2) for i in range(10)])))
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
        # if exactly one share is at version 3, we should still get v2
        d.addCallback(lambda res: self._set_versions({0: 3}))
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
        # but the servermap should see the unrecoverable version. This
        # depends upon the single newer share being queried early.
        d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))

        def _check_smap(smap):
            self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
            newer = smap.unrecoverable_newer_versions()
            self.failUnlessEqual(len(newer), 1)
            verinfo, health = newer.items()[0]
            self.failUnlessEqual(verinfo[0], 4)
            self.failUnlessEqual(health, (1, 3))
            self.failIf(smap.needs_merge())

        d.addCallback(_check_smap)
        # if we have a mix of two parallel versions (s4a and s4b), we could
        # recover either
        d.addCallback(lambda res: self._set_versions({
            0: 3,
            2: 3,
            4: 3,
            6: 3,
            8: 3,
            1: 4,
            3: 4,
            5: 4,
            7: 4,
            9: 4
        }))
        d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))

        def _check_smap_mixed(smap):
            self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
            newer = smap.unrecoverable_newer_versions()
            self.failUnlessEqual(len(newer), 0)
            self.failUnless(smap.needs_merge())

        d.addCallback(_check_smap_mixed)
        d.addCallback(lambda res: self._fn.download_best_version())
        d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
                                                  res == self.CONTENTS[4]))
        return d
Exemple #12
0
    def test_repairer_servers_of_happiness(self):
        # The repairer is supposed to generate and place as many of the
        # missing shares as possible without caring about how they are
        # distributed.
        self.basedir = "repairer/Repairer/repairer_servers_of_happiness"
        self.set_up_grid(num_clients=2, num_servers=10)
        d = self.upload_and_stash()

        # Now delete some servers. We want to leave 3 servers, which
        # will allow us to restore the file to a healthy state without
        # distributing the shares widely enough to satisfy the default
        # happiness setting.
        def _delete_some_servers(ignored):
            for i in xrange(7):
                self.g.remove_server(self.g.servers_by_number[i].my_nodeid)

            assert len(self.g.servers_by_number) == 3

        d.addCallback(_delete_some_servers)
        # Now try to repair the file.
        d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(
            Monitor(), verify=False))

        def _check_results(crr):
            self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults)
            pre = crr.get_pre_repair_results()
            post = crr.get_post_repair_results()
            for p in (pre, post):
                self.failUnlessIsInstance(p, check_results.CheckResults)

            self.failIf(pre.is_healthy())
            self.failUnless(post.is_healthy())

        d.addCallback(_check_results)
        return d
Exemple #13
0
 def _then(ign):
     ss = self.g.servers_by_number[0]
     # we want to delete the share corresponding to the server
     # we're making not-respond
     share = next(ss._get_bucket_shares(self.c0_filenode.get_storage_index()))[0]
     self.delete_shares_numbered(self.uri, [share])
     return self.c0_filenode.check_and_repair(Monitor())
Exemple #14
0
 def make_servermap(self, mode=MODE_READ, oldmap=None):
     if oldmap is None:
         oldmap = ServerMap()
     smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
                            oldmap, mode)
     d = smu.update()
     return d
Exemple #15
0
    def test_check_and_repair_readcap(self):
        # we can't currently repair from a mutable readcap: #625
        self.old_shares = []
        d = self.publish_one()
        d.addCallback(self.copy_shares)

        def _get_readcap(res):
            self._fn3 = self._fn.get_readonly()
            # also delete some shares
            for peerid, shares in list(self._storage._peers.items()):
                shares.pop(0, None)

        d.addCallback(_get_readcap)
        d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))

        def _check_results(crr):
            self.assertThat(ICheckAndRepairResults.providedBy(crr),
                            Equals(True))
            # we should detect the unhealthy, but skip over mutable-readcap
            # repairs until #625 is fixed
            self.assertThat(crr.get_pre_repair_results().is_healthy(),
                            Equals(False))
            self.assertThat(crr.get_repair_attempted(), Equals(False))
            self.assertThat(crr.get_post_repair_results().is_healthy(),
                            Equals(False))

        d.addCallback(_check_results)
        return d
Exemple #16
0
    def test_verify_mdmf_all_bad_sharedata(self):
        d = self.publish_mdmf()
        # On 8 of the shares, corrupt the beginning of the share data.
        # The signature check during the servermap update won't catch this.
        d.addCallback(lambda ignored: corrupt(None, self._storage,
                                              "share_data", list(range(8))))
        # On 2 of the shares, corrupt the end of the share data.
        # The signature check during the servermap update won't catch
        # this either, and the retrieval process will have to process
        # all of the segments before it notices.
        d.addCallback(
            lambda ignored:
            # the block hash tree comes right after the share data, so if we
            # corrupt a little before the block hash tree, we'll corrupt in the
            # last block of each share.
            corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
        d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
        # The verifier should flag the file as unhealthy, and should
        # list all 10 shares as bad.
        d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")

        def _check_num_bad(r):
            self.failIf(r.is_recoverable())
            smap = r.get_servermap()
            self.failUnlessEqual(len(smap.get_bad_shares()), 10)

        d.addCallback(_check_num_bad)
        return d
Exemple #17
0
    def test_repair_from_deletion_of_1(self):
        """ Repair replaces a share that got deleted. """
        self.basedir = "repairer/Repairer/repair_from_deletion_of_1"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()

        d.addCallback(
            lambda ignored: self.delete_shares_numbered(self.uri, [2]))
        d.addCallback(lambda ignored: self._stash_counts())
        d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(
            Monitor(), verify=False))

        def _check_results(crr):
            self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults)
            pre = crr.get_pre_repair_results()
            self.failUnlessIsInstance(pre, check_results.CheckResults)
            post = crr.get_post_repair_results()
            self.failUnlessIsInstance(post, check_results.CheckResults)
            delta_reads, delta_allocates, delta_writes = self._get_delta_counts(
            )
            self.failIfBigger(delta_reads, MAX_DELTA_READS)
            self.failIfBigger(delta_allocates, DELTA_WRITES_PER_SHARE)
            self.failIf(pre.is_healthy())
            self.failUnless(post.is_healthy())

            # Now we inspect the filesystem to make sure that it has 10
            # shares.
            shares = self.find_uri_shares(self.uri)
            self.failIf(len(shares) < 10)

        d.addCallback(_check_results)

        d.addCallback(
            lambda ignored: self.c0_filenode.check(Monitor(), verify=True))
        d.addCallback(lambda vr: self.failUnless(vr.is_healthy()))

        # Now we delete seven of the other shares, then try to download the
        # file and assert that it succeeds at downloading and has the right
        # contents. This can't work unless it has already repaired the
        # previously-deleted share #2.

        d.addCallback(lambda ignored: self.delete_shares_numbered(
            self.uri, range(3, 10 + 1)))
        d.addCallback(lambda ignored: download_to_data(self.c1_filenode))
        d.addCallback(
            lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA))
        return d
Exemple #18
0
    def test_merge(self):
        self.old_shares = []
        d = self.publish_multiple()
        # repair will refuse to merge multiple highest seqnums unless you
        # pass force=True
        d.addCallback(lambda res: self._set_versions({
            0: 3,
            2: 3,
            4: 3,
            6: 3,
            8: 3,
            1: 4,
            3: 4,
            5: 4,
            7: 4,
            9: 4
        }))
        d.addCallback(self.copy_shares)
        d.addCallback(lambda res: self._fn.check(Monitor()))

        def _try_repair(check_results):
            ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
            d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
                                 self._fn.repair, check_results)
            d2.addCallback(self.copy_shares)
            d2.addCallback(self.failIfSharesChanged)
            d2.addCallback(lambda res: check_results)
            return d2

        d.addCallback(_try_repair)
        d.addCallback(
            lambda check_results: self._fn.repair(check_results, force=True))

        # this should give us 10 shares of the highest roothash
        def _check_repair_results(rres):
            self.assertThat(rres.get_successful(), Equals(True))
            pass  # TODO

        d.addCallback(_check_repair_results)
        d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))

        def _check_smap(smap):
            self.assertThat(smap.recoverable_versions(), HasLength(1))
            self.assertThat(smap.unrecoverable_versions(), HasLength(0))
            # now, which should have won?
            roothash_s4a = self.get_roothash_for(3)
            roothash_s4b = self.get_roothash_for(4)
            if roothash_s4b > roothash_s4a:
                expected_contents = self.CONTENTS[4]
            else:
                expected_contents = self.CONTENTS[3]
            new_versionid = smap.best_recoverable_version()
            self.assertThat(new_versionid[0], Equals(5))  # seqnum 5
            d2 = self._fn.download_version(smap, new_versionid)
            d2.addCallback(self.assertEqual, expected_contents)
            return d2

        d.addCallback(_check_smap)
        return d
Exemple #19
0
 def test_verify_one_bad_block(self):
     d = corrupt(None, self._storage, "share_data", [9])  # bad blocks
     # the Verifier *will* notice this, since it examines every byte
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_one_bad_block")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "block hash tree failure", "test_verify_one_bad_block")
     return d
Exemple #20
0
 def test_check_not_enough_shares(self):
     for shares in list(self._storage._peers.values()):
         for shnum in list(shares.keys()):
             if shnum > 0:
                 del shares[shnum]
     d = self._fn.check(Monitor())
     d.addCallback(self.check_bad, "test_check_not_enough_shares")
     return d
Exemple #21
0
    def test_harness(self):
        # This test is actually to make sure our test harness works, rather
        # than testing anything about Tahoe code itself.

        self.basedir = "repairer/Repairer/test_code"
        self.set_up_grid(num_clients=2)
        d = self.upload_and_stash()

        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        def _stash_shares(oldshares):
            self.oldshares = oldshares
        d.addCallback(_stash_shares)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        def _compare(newshares):
            self.failUnlessEqual(newshares, self.oldshares)
        d.addCallback(_compare)

        def _delete_8(ignored):
            shnum = self.oldshares[0][0]
            self.delete_shares_numbered(self.uri, [shnum])
            for sh in self.oldshares[1:8]:
                self.delete_share(sh)
        d.addCallback(_delete_8)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        d.addCallback(lambda shares: self.failUnlessEqual(len(shares), 2))

        d.addCallback(lambda ignored:
                      self.shouldFail(NotEnoughSharesError, "then_download",
                                      None,
                                      download_to_data, self.c1_filenode))

        d.addCallback(lambda ignored:
                      self.shouldFail(NotEnoughSharesError, "then_repair",
                                      None,
                                      self.c1_filenode.check_and_repair,
                                      Monitor(), verify=False))

        # test share corruption
        def _test_corrupt(ignored):
            olddata = {}
            shares = self.find_uri_shares(self.uri)
            for (shnum, serverid, sharefile) in shares:
                olddata[ (shnum, serverid) ] = open(sharefile, "rb").read()
            for sh in shares:
                self.corrupt_share(sh, common._corrupt_uri_extension)
            for (shnum, serverid, sharefile) in shares:
                newdata = open(sharefile, "rb").read()
                self.failIfEqual(olddata[ (shnum, serverid) ], newdata)
        d.addCallback(_test_corrupt)

        def _remove_all(ignored):
            for sh in self.find_uri_shares(self.uri):
                self.delete_share(sh)
        d.addCallback(_remove_all)
        d.addCallback(lambda ignored: self.find_uri_shares(self.uri))
        d.addCallback(lambda shares: self.failUnlessEqual(shares, []))

        return d
Exemple #22
0
 def _start(ign):
     d = defer.succeed(None)
     for i in range(len(self.sh0_orig)):
         d.addCallback(_corrupt, i)
         d.addCallback(
             lambda ign: self.c1_filenode.check(Monitor(), verify=True))
         d.addCallback(_did_check, i)
         d.addCallback(_fix_sh0)
     return d
Exemple #23
0
 def test_verify_one_bad_encprivkey_uncheckable(self):
     d = corrupt(None, self._storage, "enc_privkey", [9])  # bad privkey
     readonly_fn = self._fn.get_readonly()
     # a read-only node has no way to validate the privkey
     d.addCallback(
         lambda ignored: readonly_fn.check(Monitor(), verify=True))
     d.addCallback(self.check_good,
                   "test_verify_one_bad_encprivkey_uncheckable")
     return d
Exemple #24
0
 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "enc_privkey", [1]))
     d.addCallback(lambda ignored: self._fn.get_readonly())
     d.addCallback(lambda fn: fn.check(Monitor(), verify=True))
     d.addCallback(self.check_good,
                   "test_verify_mdmf_bad_encprivkey_uncheckable")
     return d
Exemple #25
0
 def test_verify_mdmf_bad_encprivkey(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "enc_privkey", [0]))
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "privkey", "test_verify_mdmf_bad_encprivkey")
     return d
Exemple #26
0
 def test_verify_mdmf_one_bad_block(self):
     d = self.publish_mdmf()
     d.addCallback(
         lambda ignored: corrupt(None, self._storage, "share_data", [1]))
     d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
     # We should find one bad block here
     d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
     d.addCallback(self.check_expected_failure, CorruptShareError,
                   "block hash tree failure",
                   "test_verify_mdmf_one_bad_block")
     return d
Exemple #27
0
    def test_check_mdmf_no_shares(self):
        d = self.publish_mdmf()

        def _then(ignored):
            for share in list(self._storage._peers.values()):
                share.clear()

        d.addCallback(_then)
        d.addCallback(lambda ignored: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
        return d
Exemple #28
0
            def _after_repair(checkandrepairresults):
                prerepairres = checkandrepairresults.get_pre_repair_results()
                postrepairres = checkandrepairresults.get_post_repair_results()
                after_repair_reads = self._count_reads()
                after_repair_allocates = self._count_writes()

                # The "* 2" in reads is because you might read a whole share
                # before figuring out that it is corrupted. It might be
                # possible to make this delta reads number a little tighter.
                self.failIf(
                    after_repair_reads - before_repair_reads >
                    (MAX_DELTA_READS * 2),
                    (after_repair_reads, before_repair_reads))
                # The "* 2" in writes is because each server has two shares,
                # and it is reasonable for repairer to conclude that there
                # are two shares that it should upload, if the server fails
                # to serve the first share.
                self.failIf(
                    after_repair_allocates - before_repair_allocates >
                    (DELTA_WRITES_PER_SHARE * 2),
                    (after_repair_allocates, before_repair_allocates))
                self.failIf(prerepairres.is_healthy(),
                            (prerepairres.data, corruptor_func))
                self.failUnless(postrepairres.is_healthy(),
                                (postrepairres.data, corruptor_func))

                # Now we inspect the filesystem to make sure that it has 10
                # shares.
                shares = self.find_all_shares()
                self.failIf(len(shares) < 10)

                # Now we assert that the verifier reports the file as healthy.
                d3 = self.filenode.check(Monitor(), verify=True)

                def _after_verify(verifyresults):
                    self.failUnless(verifyresults.is_healthy())

                d3.addCallback(_after_verify)

                # Now we delete seven of the other shares, then try to
                # download the file and assert that it succeeds at
                # downloading and has the right contents. This can't work
                # unless it has already repaired the previously-corrupted share.
                def _then_delete_7_and_try_a_download(unused=None):
                    shnums = range(10)
                    shnums.remove(shnum)
                    random.shuffle(shnums)
                    for sharenum in shnums[:7]:
                        self._delete_a_share(sharenum=sharenum)

                    return self._download_and_check_plaintext()

                d3.addCallback(_then_delete_7_and_try_a_download)
                return d3
Exemple #29
0
    def test_check_mdmf_not_enough_shares(self):
        d = self.publish_mdmf()

        def _then(ignored):
            for shares in list(self._storage._peers.values()):
                for shnum in list(shares.keys()):
                    if shnum > 0:
                        del shares[shnum]

        d.addCallback(_then)
        d.addCallback(lambda ignored: self._fn.check(Monitor()))
        d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
        return d
Exemple #30
0
    def test_finish(self):
        """The monitor can finish."""
        m = Monitor()
        self.assertFalse(m.is_finished())
        d = m.when_done()
        self.assertNoResult(d)

        result = m.finish(300)
        self.assertEqual(result, 300)
        self.assertEqual(m.get_status(), 300)
        self.assertTrue(m.is_finished())

        d.addBoth(self.assertEqual, 300)
        return d