Пример #1
0
 def make_servermap(self, mode=MODE_READ, oldmap=None):
     if oldmap is None:
         oldmap = ServerMap()
     smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
                            oldmap, mode)
     d = smu.update()
     return d
Пример #2
0
 def _get_servermap(self, mode):
     """
     I am a serialized twin to get_servermap.
     """
     servermap = ServerMap()
     d = self._update_servermap(servermap, mode)
     # The servermap will tell us about the most recent size of the
     # file, so we may as well set that so that callers might get
     # more data about us.
     if not self._most_recent_size:
         d.addCallback(self._get_size_from_servermap)
     return d
Пример #3
0
    def _download_best_version(self):
        servermap = ServerMap()
        d = self._try_once_to_download_best_version(servermap, MODE_READ)

        def _maybe_retry(f):
            f.trap(NotEnoughSharesError)
            # the download is worth retrying once. Make sure to use the
            # old servermap, since it is what remembers the bad shares,
            # but use MODE_WRITE to make it look for even more shares.
            # TODO: consider allowing this to retry multiple times.. this
            # approach will let us tolerate about 8 bad shares, I think.
            return self._try_once_to_download_best_version(
                servermap, MODE_WRITE)

        d.addErrback(_maybe_retry)
        return d
Пример #4
0
 def make_servermap(self,
                    mode=MODE_CHECK,
                    fn=None,
                    sb=None,
                    update_range=None):
     if fn is None:
         fn = self._fn
     if sb is None:
         sb = self._storage_broker
     smu = ServermapUpdater(fn,
                            sb,
                            Monitor(),
                            ServerMap(),
                            mode,
                            update_range=update_range)
     d = smu.update()
     return d
Пример #5
0
 def check(self, verify=False, add_lease=False):
     servermap = ServerMap()
     # Updating the servermap in MODE_CHECK will stand a good chance
     # of finding all of the shares, and getting a good idea of
     # recoverability, etc, without verifying.
     u = ServermapUpdater(self._node, self._storage_broker, self._monitor,
                          servermap, self.SERVERMAP_MODE,
                          add_lease=add_lease)
     if self._history:
         self._history.notify_mapupdate(u.get_status())
     d = u.update()
     d.addCallback(self._got_mapupdate_results)
     if verify:
         d.addCallback(self._verify_all_shares)
     d.addCallback(lambda res: servermap)
     d.addCallback(self._make_checker_results)
     return d
Пример #6
0
 def check(self, verify=False, add_lease=False):
     servermap = ServerMap()
     u = ServermapUpdater(self._node,
                          self._storage_broker,
                          self._monitor,
                          servermap,
                          MODE_CHECK,
                          add_lease=add_lease)
     if self._history:
         self._history.notify_mapupdate(u.get_status())
     d = u.update()
     d.addCallback(self._got_mapupdate_results)
     if verify:
         d.addCallback(self._verify_all_shares)
     d.addCallback(lambda res: servermap)
     d.addCallback(self._fill_checker_results, self.results)
     d.addCallback(lambda res: self.results)
     return d
Пример #7
0
    def start(self, force=False):
        # download, then re-publish. If a server had a bad share, try to
        # replace it with a good one of the same shnum.

        # The normal repair operation should not be used to replace
        # application-specific merging of alternate versions: i.e if there
        # are multiple highest seqnums with different roothashes. In this
        # case, the application must use node.upload() (referencing the
        # servermap that indicates the multiple-heads condition), or
        # node.overwrite(). The repair() operation will refuse to run in
        # these conditions unless a force=True argument is provided. If
        # force=True is used, then the highest root hash will be reinforced.

        # Likewise, the presence of an unrecoverable latest version is an
        # unusual event, and should ideally be handled by retrying a couple
        # times (spaced out over hours or days) and hoping that new shares
        # will become available. If repair(force=True) is called, data will
        # be lost: a new seqnum will be generated with the same contents as
        # the most recent recoverable version, skipping over the lost
        # version. repair(force=False) will refuse to run in a situation like
        # this.

        # Repair is designed to fix the following injuries:
        #  missing shares: add new ones to get at least N distinct ones
        #  old shares: replace old shares with the latest version
        #  bogus shares (bad sigs): replace the bad one with a good one

        # first, update the servermap in MODE_REPAIR, which files all shares
        # and makes sure we get the privkey.
        u = ServermapUpdater(self.node, self._storage_broker, self._monitor,
                             ServerMap(), MODE_REPAIR)
        if self._history:
            self._history.notify_mapupdate(u.get_status())
        d = u.update()
        d.addCallback(self._got_full_servermap, force)
        return d
Пример #8
0
    def publish(self, newdata):
        """Publish the filenode's current contents.  Returns a Deferred that
        fires (with None) when the publish has done as much work as it's ever
        going to do, or errbacks with ConsistencyError if it detects a
        simultaneous write.
        """

        # 1: generate shares (SDMF: files are small, so we can do it in RAM)
        # 2: perform peer selection, get candidate servers
        #  2a: send queries to n+epsilon servers, to determine current shares
        #  2b: based upon responses, create target map
        # 3: send slot_testv_and_readv_and_writev messages
        # 4: as responses return, update share-dispatch table
        # 4a: may need to run recovery algorithm
        # 5: when enough responses are back, we're done

        self.log("starting publish, datalen is %s" % len(newdata))
        self._status.set_size(len(newdata))
        self._status.set_status("Started")
        self._started = time.time()

        self.done_deferred = defer.Deferred()

        self._writekey = self._node.get_writekey()
        assert self._writekey, "need write capability to publish"

        # first, which servers will we publish to? We require that the
        # servermap was updated in MODE_WRITE, so we can depend upon the
        # peerlist computed by that process instead of computing our own.
        if self._servermap:
            assert self._servermap.last_update_mode in (MODE_WRITE, MODE_CHECK)
            # we will push a version that is one larger than anything present
            # in the grid, according to the servermap.
            self._new_seqnum = self._servermap.highest_seqnum() + 1
        else:
            # If we don't have a servermap, that's because we're doing the
            # initial publish
            self._new_seqnum = 1
            self._servermap = ServerMap()
        self._status.set_servermap(self._servermap)

        self.log(format="new seqnum will be %(seqnum)d",
                 seqnum=self._new_seqnum,
                 level=log.NOISY)

        # having an up-to-date servermap (or using a filenode that was just
        # created for the first time) also guarantees that the following
        # fields are available
        self.readkey = self._node.get_readkey()
        self.required_shares = self._node.get_required_shares()
        assert self.required_shares is not None
        self.total_shares = self._node.get_total_shares()
        assert self.total_shares is not None
        self._status.set_encoding(self.required_shares, self.total_shares)

        self._pubkey = self._node.get_pubkey()
        assert self._pubkey
        self._privkey = self._node.get_privkey()
        assert self._privkey
        self._encprivkey = self._node.get_encprivkey()

        sb = self._storage_broker
        full_peerlist = [(s.get_serverid(), s.get_rref())
                         for s in sb.get_servers_for_psi(self._storage_index)]
        self.full_peerlist = full_peerlist  # for use later, immutable
        self.bad_peers = set()  # peerids who have errbacked/refused requests

        self.newdata = newdata
        self.salt = os.urandom(16)

        self.setup_encoding_parameters()

        # if we experience any surprises (writes which were rejected because
        # our test vector did not match, or shares which we didn't expect to
        # see), we set this flag and report an UncoordinatedWriteError at the
        # end of the publish process.
        self.surprised = False

        # as a failsafe, refuse to iterate through self.loop more than a
        # thousand times.
        self.looplimit = 1000

        # we keep track of three tables. The first is our goal: which share
        # we want to see on which servers. This is initially populated by the
        # existing servermap.
        self.goal = set()  # pairs of (peerid, shnum) tuples

        # the second table is our list of outstanding queries: those which
        # are in flight and may or may not be delivered, accepted, or
        # acknowledged. Items are added to this table when the request is
        # sent, and removed when the response returns (or errbacks).
        self.outstanding = set()  # (peerid, shnum) tuples

        # the third is a table of successes: share which have actually been
        # placed. These are populated when responses come back with success.
        # When self.placed == self.goal, we're done.
        self.placed = set()  # (peerid, shnum) tuples

        # we also keep a mapping from peerid to RemoteReference. Each time we
        # pull a connection out of the full peerlist, we add it to this for
        # use later.
        self.connections = {}

        self.bad_share_checkstrings = {}

        # we use the servermap to populate the initial goal: this way we will
        # try to update each existing share in place.
        for (peerid, shnum) in self._servermap.servermap:
            self.goal.add((peerid, shnum))
            self.connections[peerid] = self._servermap.connections[peerid]
        # then we add in all the shares that were bad (corrupted, bad
        # signatures, etc). We want to replace these.
        for key, old_checkstring in self._servermap.bad_shares.items():
            (peerid, shnum) = key
            self.goal.add(key)
            self.bad_share_checkstrings[key] = old_checkstring
            self.connections[peerid] = self._servermap.connections[peerid]

        # create the shares. We'll discard these as they are delivered. SDMF:
        # we're allowed to hold everything in memory.

        self._status.timings["setup"] = time.time() - self._started
        d = self._encrypt_and_encode()
        d.addCallback(self._generate_shares)

        def _start_pushing(res):
            self._started_pushing = time.time()
            return res

        d.addCallback(_start_pushing)
        d.addCallback(self.loop)  # trigger delivery
        d.addErrback(self._fatal_error)

        return self.done_deferred
Пример #9
0
 def _get_servermap(self, mode):
     servermap = ServerMap()
     return self._update_servermap(servermap, mode)
Пример #10
0
 def _modify(self, modifier, backoffer):
     servermap = ServerMap()
     if backoffer is None:
         backoffer = BackoffAgent().delay
     return self._modify_and_retry(servermap, modifier, backoffer, True)
Пример #11
0
 def _overwrite(self, new_contents):
     servermap = ServerMap()
     d = self._update_servermap(servermap, mode=MODE_WRITE)
     d.addCallback(lambda ignored: self._upload(new_contents, servermap))
     return d