Esempio n. 1
0
    def test_good_share_hosts(self):
        self.basedir = "checker/BalancingAct/1115"
        self.set_up_grid(num_servers=1)
        c0 = self.g.clients[0]
        c0.encoding_params['happy'] = 1
        c0.encoding_params['n'] = 4
        c0.encoding_params['k'] = 3

        DATA = "data" * 100
        d = c0.upload(Data(DATA, convergence=""))

        def _stash_immutable(ur):
            self.imm = c0.create_node_from_uri(ur.get_uri())
            self.uri = self.imm.get_uri()

        d.addCallback(_stash_immutable)
        d.addCallback(lambda ign: self.find_uri_shares(self.uri))

        def _store_shares(shares):
            self.shares = shares

        d.addCallback(_store_shares)

        def add_three(_, i):
            # Add a new server with just share 3
            self.add_server_with_share(i, self.uri, 3)
            #print self._pretty_shares_chart(self.uri)

        for i in range(1, 5):
            d.addCallback(add_three, i)

        def _check_and_repair(_):
            return self.imm.check_and_repair(Monitor())

        def _check_counts(crr, shares_good, good_share_hosts):
            prr = crr.get_post_repair_results()
            #print self._pretty_shares_chart(self.uri)
            self.failUnlessEqual(prr.get_share_counter_good(), shares_good)
            self.failUnlessEqual(prr.get_host_counter_good_shares(),
                                 good_share_hosts)

        """
        Initial sharemap:
            0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E]
          4 good shares, but 5 good hosts
        After deleting all instances of share #3 and repairing:
            0:[A,B], 1:[A,C], 2:[A,D], 3:[E]
          Still 4 good shares and 5 good hosts
            """
        d.addCallback(_check_and_repair)
        d.addCallback(_check_counts, 4, 5)
        d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3]))
        d.addCallback(_check_and_repair)
        d.addCallback(_check_counts, 4, 5)
        d.addCallback(
            lambda _:
            [self.g.break_server(sid) for sid in self.g.get_all_serverids()])
        d.addCallback(_check_and_repair)
        d.addCallback(_check_counts, 0, 0)
        return d
Esempio n. 2
0
 def create_immutable_directory(self, children, convergence=None):
     if convergence is None:
         convergence = self.secret_holder.get_convergence_secret()
     packed = pack_children(children, None, deep_immutable=True)
     uploadable = Data(packed, convergence)
     d = self.uploader.upload(uploadable, history=self.history)
     d.addCallback(lambda results: self.create_from_cap(None, results.uri))
     d.addCallback(self._create_dirnode)
     return d
Esempio n. 3
0
 def _start(ign):
     self.set_up_grid(num_servers=4)
     self.c0 = self.g.clients[0]
     self.c0.encoding_params = { "k": 1,
                                 "happy": 4,
                                 "n": 4,
                                 "max_segment_size": 5,
                               }
     self.uris = {}
     DATA = "data" * 100 # 400/5 = 80 blocks
     return self.c0.upload(Data(DATA, convergence=""))
Esempio n. 4
0
    def test_875(self):
        self.basedir = "checker/AddLease/875"
        self.set_up_grid(num_servers=1)
        c0 = self.g.clients[0]
        c0.encoding_params['happy'] = 1
        self.uris = {}
        DATA = "data" * 100
        d = c0.upload(Data(DATA, convergence=""))

        def _stash_immutable(ur):
            self.imm = c0.create_node_from_uri(ur.get_uri())

        d.addCallback(_stash_immutable)
        d.addCallback(
            lambda ign: c0.create_mutable_file(MutableData("contents")))

        def _stash_mutable(node):
            self.mut = node

        d.addCallback(_stash_mutable)

        def _check_cr(cr, which):
            self.failUnless(cr.is_healthy(), which)

        # these two should work normally
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-normal")
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-normal")

        really_did_break = []

        # now break the server's remote_add_lease call
        def _break_add_lease(ign):
            def broken_add_lease(*args, **kwargs):
                really_did_break.append(1)
                raise KeyError("intentional failure, should be ignored")

            assert self.g.servers_by_number[0].remote_add_lease
            self.g.servers_by_number[0].remote_add_lease = broken_add_lease

        d.addCallback(_break_add_lease)

        # and confirm that the files still look healthy
        d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "mutable-broken")
        d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
        d.addCallback(_check_cr, "immutable-broken")

        d.addCallback(lambda ign: self.failUnless(really_did_break))
        return d
 def startup(self, basedir):
     self.basedir = basedir
     self.set_up_grid(num_clients=2, num_servers=5)
     c1 = self.g.clients[1]
     # We need multiple segments to test crypttext hash trees that are
     # non-trivial (i.e. they have more than just one hash in them).
     c1.encoding_params['max_segment_size'] = 12
     # Tests that need to test servers of happiness using this should
     # set their own value for happy -- the default (7) breaks stuff.
     c1.encoding_params['happy'] = 1
     d = c1.upload(Data(TEST_DATA, convergence=""))
     def _after_upload(ur):
         self.uri = ur.get_uri()
         self.filenode = self.g.clients[0].create_node_from_uri(ur.get_uri())
         return self.uri
     d.addCallback(_after_upload)
     return d
Esempio n. 6
0
    def test_upload(self):
        basedir = "no_network/Harness/upload"
        g = self.grid(basedir)
        g.setServiceParent(self.s)

        c0 = g.clients[0]
        DATA = "Data to upload" * 100
        data = Data(DATA, "")
        d = c0.upload(data)
        def _uploaded(res):
            n = c0.create_node_from_uri(res.get_uri())
            return download_to_data(n)
        d.addCallback(_uploaded)
        def _check(res):
            self.failUnlessEqual(res, DATA)
        d.addCallback(_check)

        return d
Esempio n. 7
0
    def test_good_share_hosts(self):
        self.basedir = "checker/BalancingAct/1115"
        self.set_up_grid(num_servers=1)
        c0 = self.g.clients[0]
        c0.encoding_params['happy'] = 1
        c0.encoding_params['n'] = 4
        c0.encoding_params['k'] = 3

        DATA = "data" * 100
        d = c0.upload(Data(DATA, convergence=""))

        def _stash_immutable(ur):
            self.imm = c0.create_node_from_uri(ur.get_uri())
            self.uri = self.imm.get_uri()

        d.addCallback(_stash_immutable)
        d.addCallback(lambda ign: self.find_uri_shares(self.uri))

        def _store_shares(shares):
            self.shares = shares

        d.addCallback(_store_shares)

        def add_three(_, i):
            # Add a new server with just share 3
            self.add_server_with_share(i, self.uri, 3)
            #print self._pretty_shares_chart(self.uri)

        for i in range(1, 5):
            d.addCallback(add_three, i)

        def _check_and_repair(_):
            return self.imm.check_and_repair(Monitor())

        def _check_counts(crr, shares_good, good_share_hosts):
            prr = crr.get_post_repair_results()
            self.failUnlessEqual(prr.get_share_counter_good(), shares_good)
            self.failUnlessEqual(prr.get_host_counter_good_shares(),
                                 good_share_hosts)

        """
        Initial sharemap:
            0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E]
          4 good shares, but 5 good hosts
        After deleting all instances of share #3 and repairing:
            0:[A], 1:[A,B], 2:[C,A], 3:[E]
# actually: {0: ['E', 'A'], 1: ['C', 'A'], 2: ['A', 'B'], 3: ['D']}
          Still 4 good shares but now 4 good hosts
            """
        d.addCallback(_check_and_repair)
        d.addCallback(_check_counts, 4, 5)
        d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3]))
        d.addCallback(_check_and_repair)

        # it can happen that our uploader will choose, e.g., to upload
        # to servers B, C, D, E .. which will mean that all 5 serves
        # now contain our shares (and thus "respond").

        def _check_happy(crr):
            prr = crr.get_post_repair_results()
            self.assertTrue(prr.get_host_counter_good_shares() >= 4)
            return crr

        d.addCallback(_check_happy)
        d.addCallback(lambda _: all(
            [self.g.break_server(sid) for sid in self.g.get_all_serverids()]))
        d.addCallback(_check_and_repair)
        d.addCallback(_check_counts, 0, 0)
        return d
Esempio n. 8
0
        def _maybe_upload(ign, now=None):
            self._log("_maybe_upload: relpath_u=%r, now=%r" % (relpath_u, now))
            if now is None:
                now = time.time()
            fp = self._get_filepath(relpath_u)
            pathinfo = get_pathinfo(unicode_from_filepath(fp))

            self._log("about to remove %r from pending set %r" %
                      (relpath_u, self._pending))
            try:
                self._pending.remove(relpath_u)
            except KeyError:
                self._log("WRONG that %r wasn't in pending" % (relpath_u,))
            encoded_path_u = magicpath.path2magic(relpath_u)

            if not pathinfo.exists:
                # FIXME merge this with the 'isfile' case.
                self._log("notified object %s disappeared (this is normal)" % quote_filepath(fp))
                self._count('objects_disappeared')

                db_entry = self._db.get_db_entry(relpath_u)
                if db_entry is None:
                    return False

                last_downloaded_timestamp = now  # is this correct?

                if is_new_file(pathinfo, db_entry):
                    new_version = db_entry.version + 1
                else:
                    self._log("Not uploading %r" % (relpath_u,))
                    self._count('objects_not_uploaded')
                    return False

                metadata = {
                    'version': new_version,
                    'deleted': True,
                    'last_downloaded_timestamp': last_downloaded_timestamp,
                }
                if db_entry.last_downloaded_uri is not None:
                    metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri

                empty_uploadable = Data("", self._client.convergence)
                d2 = self._upload_dirnode.add_file(
                    encoded_path_u, empty_uploadable,
                    metadata=metadata,
                    overwrite=True,
                    progress=item.progress,
                )

                def _add_db_entry(filenode):
                    filecap = filenode.get_uri()
                    last_downloaded_uri = metadata.get('last_downloaded_uri', None)
                    self._db.did_upload_version(relpath_u, new_version, filecap,
                                                last_downloaded_uri, last_downloaded_timestamp,
                                                pathinfo)
                    self._count('files_uploaded')
                d2.addCallback(_add_db_entry)
                d2.addCallback(lambda ign: True)
                return d2
            elif pathinfo.islink:
                self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
                return False
            elif pathinfo.isdir:
                self._log("ISDIR")
                if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False):
                    self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)

                db_entry = self._db.get_db_entry(relpath_u)
                self._log("isdir dbentry %r" % (db_entry,))
                if not is_new_file(pathinfo, db_entry):
                    self._log("NOT A NEW FILE")
                    return False

                uploadable = Data("", self._client.convergence)
                encoded_path_u += magicpath.path2magic(u"/")
                self._log("encoded_path_u =  %r" % (encoded_path_u,))
                upload_d = self._upload_dirnode.add_file(
                    encoded_path_u, uploadable,
                    metadata={"version": 0},
                    overwrite=True,
                    progress=item.progress,
                )
                def _dir_succeeded(ign):
                    self._log("created subdirectory %r" % (relpath_u,))
                    self._count('directories_created')
                def _dir_failed(f):
                    self._log("failed to create subdirectory %r" % (relpath_u,))
                    return f
                upload_d.addCallbacks(_dir_succeeded, _dir_failed)
                upload_d.addCallback(lambda ign: self._scan(relpath_u))
                upload_d.addCallback(lambda ign: True)
                return upload_d
            elif pathinfo.isfile:
                db_entry = self._db.get_db_entry(relpath_u)

                last_downloaded_timestamp = now

                if db_entry is None:
                    new_version = 0
                elif is_new_file(pathinfo, db_entry):
                    new_version = db_entry.version + 1
                else:
                    self._log("Not uploading %r" % (relpath_u,))
                    self._count('objects_not_uploaded')
                    return False

                metadata = {
                    'version': new_version,
                    'last_downloaded_timestamp': last_downloaded_timestamp,
                }
                if db_entry is not None and db_entry.last_downloaded_uri is not None:
                    metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri

                uploadable = FileName(unicode_from_filepath(fp), self._client.convergence)
                d2 = self._upload_dirnode.add_file(
                    encoded_path_u, uploadable,
                    metadata=metadata,
                    overwrite=True,
                    progress=item.progress,
                )

                def _add_db_entry(filenode):
                    filecap = filenode.get_uri()
                    last_downloaded_uri = metadata.get('last_downloaded_uri', None)
                    self._db.did_upload_version(relpath_u, new_version, filecap,
                                                last_downloaded_uri, last_downloaded_timestamp,
                                                pathinfo)
                    self._count('files_uploaded')
                    return True
                d2.addCallback(_add_db_entry)
                return d2
            else:
                self.warn("WARNING: cannot process special file %s" % quote_filepath(fp))
                return False