Esempio n. 1
0
 def __init__(self, node, segnum, k, logparent):
     self._node = node  # _Node
     self.segnum = segnum
     self._k = k
     self._shares = []  # unused Share instances, sorted by "goodness"
     # (RTT), then shnum. This is populated when DYHB
     # responses arrive, or (for later segments) at
     # startup. We remove shares from it when we call
     # sh.get_block() on them.
     self._shares_from_server = DictOfSets()  # maps server to set of
     # Shares on that server for
     # which we have outstanding
     # get_block() calls.
     self._max_shares_per_server = 1  # how many Shares we're allowed to
     # pull from each server. This starts
     # at 1 and grows if we don't have
     # sufficient diversity.
     self._active_share_map = {}  # maps shnum to outstanding (and not
     # OVERDUE) Share that provides it.
     self._overdue_share_map = DictOfSets()  # shares in the OVERDUE state
     self._lp = logparent
     self._share_observers = {}  # maps Share to EventStreamObserver for
     # active ones
     self._blocks = {}  # maps shnum to validated block data
     self._no_more_shares = False
     self._last_failure = None
     self._running = True
Esempio n. 2
0
 def __init__(self, node, segnum, k, logparent):
     self._node = node # _Node
     self.segnum = segnum
     self._k = k
     self._shares = [] # unused Share instances, sorted by "goodness"
                       # (RTT), then shnum. This is populated when DYHB
                       # responses arrive, or (for later segments) at
                       # startup. We remove shares from it when we call
                       # sh.get_block() on them.
     self._shares_from_server = DictOfSets() # maps serverid to set of
                                             # Shares on that server for
                                             # which we have outstanding
                                             # get_block() calls.
     self._max_shares_per_server = 1 # how many Shares we're allowed to
                                     # pull from each server. This starts
                                     # at 1 and grows if we don't have
                                     # sufficient diversity.
     self._active_share_map = {} # maps shnum to outstanding (and not
                                 # OVERDUE) Share that provides it.
     self._overdue_share_map = DictOfSets() # shares in the OVERDUE state
     self._lp = logparent
     self._share_observers = {} # maps Share to EventStreamObserver for
                                # active ones
     self._blocks = {} # maps shnum to validated block data
     self._no_more_shares = False
     self._last_failure = None
     self._running = True
Esempio n. 3
0
 def make_versionmap(self):
     """Return a dict that maps versionid to sets of (shnum, peerid,
     timestamp) tuples."""
     versionmap = DictOfSets()
     for ( (peerid, shnum), (verinfo, timestamp) ) in self.servermap.items():
         versionmap.add(verinfo, (shnum, peerid, timestamp))
     return versionmap
Esempio n. 4
0
    def _setup_download(self):
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         N,
         prefix,
         offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, server, timestamp) in shares:
            self.remaining_sharemap.add(shnum, server)
            # Reuse the SlotReader from the servermap.
            key = (self.verinfo, server.get_serverid(),
                   self._storage_index, shnum)
            if key in self.servermap.proxies:
                reader = self.servermap.proxies[key]
            else:
                reader = MDMFSlotReadProxy(server.get_rref(),
                                           self._storage_index, shnum, None)
            reader.server = server
            self.readers[shnum] = reader

        if len(self.remaining_sharemap) < k:
            self._raise_notenoughshareserror()

        self.shares = {} # maps shnum to validated blocks
        self._active_readers = [] # list of active readers for this dl.
        self._block_hash_trees = {} # shnum => hashtree

        for i in xrange(self._total_shares):
            # So we don't have to do this later.
            self._block_hash_trees[i] = hashtree.IncompleteHashTree(self._num_segments)

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})
Esempio n. 5
0
    def _setup_download(self):
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, server, timestamp) in shares:
            self.remaining_sharemap.add(shnum, server)
            # If the servermap update fetched anything, it fetched at least 1
            # KiB, so we ask for that much.
            # TODO: Change the cache methods to allow us to fetch all of the
            # data that they have, then change this method to do that.
            any_cache = self._node._read_from_cache(self.verinfo, shnum, 0, 1000)
            reader = MDMFSlotReadProxy(server.get_rref(), self._storage_index, shnum, any_cache)
            reader.server = server
            self.readers[shnum] = reader
        assert len(self.remaining_sharemap) >= k

        self.shares = {}  # maps shnum to validated blocks
        self._active_readers = []  # list of active readers for this dl.
        self._block_hash_trees = {}  # shnum => hashtree

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})
Esempio n. 6
0
    def download(self):
        self._done_deferred = defer.Deferred()
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, peerid, timestamp) in shares:
            self.remaining_sharemap.add(shnum, peerid)

        self.shares = {}  # maps shnum to validated blocks

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(self.remaining_sharemap) >= k
        # we start with the lowest shnums we have available, since FEC is
        # faster if we're using "primary shares"
        self.active_shnums = set(sorted(self.remaining_sharemap.keys())[:k])
        for shnum in self.active_shnums:
            # we use an arbitrary peer who has the share. If shares are
            # doubled up (more than one share per peer), we could make this
            # run faster by spreading the load among multiple peers. But the
            # algorithm to do that is more complicated than I want to write
            # right now, and a well-provisioned grid shouldn't have multiple
            # shares per peer.
            peerid = list(self.remaining_sharemap[shnum])[0]
            self.get_data(shnum, peerid)

        # control flow beyond this point: state machine. Receiving responses
        # from queries is the input. We might send out more queries, or we
        # might produce a result.

        return self._done_deferred
Esempio n. 7
0
    def _setup_download(self):
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, server, timestamp) in shares:
            self.remaining_sharemap.add(shnum, server)
            # If the servermap update fetched anything, it fetched at least 1
            # KiB, so we ask for that much.
            # TODO: Change the cache methods to allow us to fetch all of the
            # data that they have, then change this method to do that.
            any_cache = self._node._read_from_cache(self.verinfo, shnum, 0,
                                                    1000)
            reader = MDMFSlotReadProxy(server.get_rref(), self._storage_index,
                                       shnum, any_cache)
            reader.server = server
            self.readers[shnum] = reader
        assert len(self.remaining_sharemap) >= k

        self.shares = {}  # maps shnum to validated blocks
        self._active_readers = []  # list of active readers for this dl.
        self._block_hash_trees = {}  # shnum => hashtree

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})
Esempio n. 8
0
    def _gather_repair_results(self, ur, cr, crr):
        assert IUploadResults.providedBy(ur), ur
        # clone the cr (check results) to form the basis of the
        # prr (post-repair results)

        verifycap = self._verifycap
        servers_responding = set(cr.get_servers_responding())
        sm = DictOfSets()
        assert isinstance(cr.get_sharemap(), DictOfSets)
        for shnum, servers in cr.get_sharemap().items():
            for server in servers:
                sm.add(shnum, server)
        for shnum, servers in ur.get_sharemap().items():
            for server in servers:
                sm.add(shnum, server)
                servers_responding.add(server)
        servers_responding = sorted(servers_responding)

        good_hosts = len(reduce(set.union, sm.values(), set()))
        is_healthy = bool(len(sm) >= verifycap.total_shares)
        is_recoverable = bool(len(sm) >= verifycap.needed_shares)

        count_happiness = servers_of_happiness(sm)

        prr = CheckResults(
            cr.get_uri(),
            cr.get_storage_index(),
            healthy=is_healthy,
            recoverable=is_recoverable,
            count_happiness=count_happiness,
            count_shares_needed=verifycap.needed_shares,
            count_shares_expected=verifycap.total_shares,
            count_shares_good=len(sm),
            count_good_share_hosts=good_hosts,
            count_recoverable_versions=int(is_recoverable),
            count_unrecoverable_versions=int(not is_recoverable),
            servers_responding=list(servers_responding),
            sharemap=sm,
            count_wrong_shares=0,  # no such thing as wrong, for immutable
            list_corrupt_shares=cr.get_corrupt_shares(),
            count_corrupt_shares=len(cr.get_corrupt_shares()),
            list_incompatible_shares=cr.get_incompatible_shares(),
            count_incompatible_shares=len(cr.get_incompatible_shares()),
            summary="",
            report=[],
            share_problems=[],
            servermap=None)
        crr.repair_successful = is_healthy
        crr.post_repair_results = prr
        return crr
Esempio n. 9
0
    def _gather_repair_results(self, ur, cr, crr):
        assert IUploadResults.providedBy(ur), ur
        # clone the cr (check results) to form the basis of the
        # prr (post-repair results)

        verifycap = self._verifycap
        servers_responding = set(cr.get_servers_responding())
        sm = DictOfSets()
        assert isinstance(cr.get_sharemap(), DictOfSets)
        for shnum, servers in cr.get_sharemap().items():
            for server in servers:
                sm.add(shnum, server)
        for shnum, servers in ur.get_sharemap().items():
            for server in servers:
                sm.add(shnum, server)
                servers_responding.add(server)
        servers_responding = sorted(servers_responding)

        good_hosts = len(reduce(set.union, sm.values(), set()))
        is_healthy = bool(len(sm) >= verifycap.total_shares)
        is_recoverable = bool(len(sm) >= verifycap.needed_shares)

        # TODO: this may be wrong, see ticket #1115 comment:27 and ticket #1784.
        needs_rebalancing = bool(len(sm) >= verifycap.total_shares)

        prr = CheckResults(cr.get_uri(), cr.get_storage_index(),
                           healthy=is_healthy, recoverable=is_recoverable,
                           needs_rebalancing=needs_rebalancing,
                           count_shares_needed=verifycap.needed_shares,
                           count_shares_expected=verifycap.total_shares,
                           count_shares_good=len(sm),
                           count_good_share_hosts=good_hosts,
                           count_recoverable_versions=int(is_recoverable),
                           count_unrecoverable_versions=int(not is_recoverable),
                           servers_responding=list(servers_responding),
                           sharemap=sm,
                           count_wrong_shares=0, # no such thing as wrong, for immutable
                           list_corrupt_shares=cr.get_corrupt_shares(),
                           count_corrupt_shares=len(cr.get_corrupt_shares()),
                           list_incompatible_shares=cr.get_incompatible_shares(),
                           count_incompatible_shares=len(cr.get_incompatible_shares()),
                           summary="",
                           report=[],
                           share_problems=[],
                           servermap=None)
        crr.repair_successful = is_healthy
        crr.post_repair_results = prr
        return crr
Esempio n. 10
0
    def download(self):
        self._done_deferred = defer.Deferred()
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, peerid, timestamp) in shares:
            self.remaining_sharemap.add(shnum, peerid)

        self.shares = {} # maps shnum to validated blocks

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(self.remaining_sharemap) >= k
        # we start with the lowest shnums we have available, since FEC is
        # faster if we're using "primary shares"
        self.active_shnums = set(sorted(self.remaining_sharemap.keys())[:k])
        for shnum in self.active_shnums:
            # we use an arbitrary peer who has the share. If shares are
            # doubled up (more than one share per peer), we could make this
            # run faster by spreading the load among multiple peers. But the
            # algorithm to do that is more complicated than I want to write
            # right now, and a well-provisioned grid shouldn't have multiple
            # shares per peer.
            peerid = list(self.remaining_sharemap[shnum])[0]
            self.get_data(shnum, peerid)

        # control flow beyond this point: state machine. Receiving responses
        # from queries is the input. We might send out more queries, or we
        # might produce a result.

        return self._done_deferred
Esempio n. 11
0
class Retrieve:
    # this class is currently single-use. Eventually (in MDMF) we will make
    # it multi-use, in which case you can call download(range) multiple
    # times, and each will have a separate response chain. However the
    # Retrieve object will remain tied to a specific version of the file, and
    # will use a single ServerMap instance.
    implements(IPushProducer)

    def __init__(self, filenode, servermap, verinfo, fetch_privkey=False,
                 verify=False):
        self._node = filenode
        assert self._node.get_pubkey()
        self._storage_index = filenode.get_storage_index()
        assert self._node.get_readkey()
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._outstanding_queries = {} # maps (peerid,shnum) to start_time
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        assert self._node.get_pubkey()
        self.verinfo = verinfo
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate a new version.
        self._need_privkey = verify or (fetch_privkey
                                        and not self._node.get_privkey())

        if self._need_privkey:
            # TODO: Evaluate the need for this. We'll use it if we want
            # to limit how many queries are on the wire for the privkey
            # at once.
            self._privkey_query_markers = [] # one Marker for each time we've
                                             # tried to get the privkey.

        # verify means that we are using the downloader logic to verify all
        # of our shares. This tells the downloader a few things.
        # 
        # 1. We need to download all of the shares.
        # 2. We don't need to decode or decrypt the shares, since our
        #    caller doesn't care about the plaintext, only the
        #    information about which shares are or are not valid.
        # 3. When we are validating readers, we need to validate the
        #    signature on the prefix. Do we? We already do this in the
        #    servermap update?
        self._verify = verify

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)
        self.readers = {}
        self._stopped = False
        self._pause_deferred = None
        self._offset = None
        self._read_length = None
        self.log("got seqnum %d" % self.verinfo[0])


    def get_status(self):
        return self._status

    def log(self, *args, **kwargs):
        if "parent" not in kwargs:
            kwargs["parent"] = self._log_number
        if "facility" not in kwargs:
            kwargs["facility"] = "tahoe.mutable.retrieve"
        return log.msg(*args, **kwargs)

    def _set_current_status(self, state):
        seg = "%d/%d" % (self._current_segment, self._last_segment)
        self._status.set_status("segment %s (%s)" % (seg, state))

    ###################
    # IPushProducer

    def pauseProducing(self):
        """
        I am called by my download target if we have produced too much
        data for it to handle. I make the downloader stop producing new
        data until my resumeProducing method is called.
        """
        if self._pause_deferred is not None:
            return

        # fired when the download is unpaused. 
        self._old_status = self._status.get_status()
        self._set_current_status("paused")

        self._pause_deferred = defer.Deferred()


    def resumeProducing(self):
        """
        I am called by my download target once it is ready to begin
        receiving data again.
        """
        if self._pause_deferred is None:
            return

        p = self._pause_deferred
        self._pause_deferred = None
        self._status.set_status(self._old_status)

        eventually(p.callback, None)

    def stopProducing(self):
        self._stopped = True
        self.resumeProducing()


    def _check_for_paused(self, res):
        """
        I am called just before a write to the consumer. I return a
        Deferred that eventually fires with the data that is to be
        written to the consumer. If the download has not been paused,
        the Deferred fires immediately. Otherwise, the Deferred fires
        when the downloader is unpaused.
        """
        if self._stopped:
            raise DownloadStopped("our Consumer called stopProducing()")
        if self._pause_deferred is not None:
            d = defer.Deferred()
            self._pause_deferred.addCallback(lambda ignored: d.callback(res))
            return d
        return defer.succeed(res)


    def download(self, consumer=None, offset=0, size=None):
        assert IConsumer.providedBy(consumer) or self._verify

        if consumer:
            self._consumer = consumer
            # we provide IPushProducer, so streaming=True, per
            # IConsumer.
            self._consumer.registerProducer(self, streaming=True)

        self._done_deferred = defer.Deferred()
        self._offset = offset
        self._read_length = size
        self._setup_download()
        self._setup_encoding_parameters()
        self.log("starting download")
        self._started_fetching = time.time()
        # The download process beyond this is a state machine.
        # _add_active_peers will select the peers that we want to use
        # for the download, and then attempt to start downloading. After
        # each segment, it will check for doneness, reacting to broken
        # peers and corrupt shares as necessary. If it runs out of good
        # peers before downloading all of the segments, _done_deferred
        # will errback.  Otherwise, it will eventually callback with the
        # contents of the mutable file.
        self.loop()
        return self._done_deferred

    def loop(self):
        d = fireEventually(None) # avoid #237 recursion limit problem
        d.addCallback(lambda ign: self._activate_enough_peers())
        d.addCallback(lambda ign: self._download_current_segment())
        # when we're done, _download_current_segment will call _done. If we
        # aren't, it will call loop() again.
        d.addErrback(self._error)

    def _setup_download(self):
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         N,
         prefix,
         offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, peerid, timestamp) in shares:
            self.remaining_sharemap.add(shnum, peerid)
            # If the servermap update fetched anything, it fetched at least 1
            # KiB, so we ask for that much.
            # TODO: Change the cache methods to allow us to fetch all of the
            # data that they have, then change this method to do that.
            any_cache = self._node._read_from_cache(self.verinfo, shnum,
                                                    0, 1000)
            ss = self.servermap.connections[peerid]
            reader = MDMFSlotReadProxy(ss,
                                       self._storage_index,
                                       shnum,
                                       any_cache)
            reader.peerid = peerid
            self.readers[shnum] = reader
        assert len(self.remaining_sharemap) >= k

        self.shares = {} # maps shnum to validated blocks
        self._active_readers = [] # list of active readers for this dl.
        self._block_hash_trees = {} # shnum => hashtree

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})

    def decode(self, blocks_and_salts, segnum):
        """
        I am a helper method that the mutable file update process uses
        as a shortcut to decode and decrypt the segments that it needs
        to fetch in order to perform a file update. I take in a
        collection of blocks and salts, and pick some of those to make a
        segment with. I return the plaintext associated with that
        segment.
        """
        # shnum => block hash tree. Unused, but setup_encoding_parameters will
        # want to set this.
        self._block_hash_trees = None
        self._setup_encoding_parameters()

        # This is the form expected by decode.
        blocks_and_salts = blocks_and_salts.items()
        blocks_and_salts = [(True, [d]) for d in blocks_and_salts]

        d = self._decode_blocks(blocks_and_salts, segnum)
        d.addCallback(self._decrypt_segment)
        return d


    def _setup_encoding_parameters(self):
        """
        I set up the encoding parameters, including k, n, the number
        of segments associated with this file, and the segment decoders.
        """
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         n,
         known_prefix,
         offsets_tuple) = self.verinfo
        self._required_shares = k
        self._total_shares = n
        self._segment_size = segsize
        self._data_length = datalength

        if not IV:
            self._version = MDMF_VERSION
        else:
            self._version = SDMF_VERSION

        if datalength and segsize:
            self._num_segments = mathutil.div_ceil(datalength, segsize)
            self._tail_data_size = datalength % segsize
        else:
            self._num_segments = 0
            self._tail_data_size = 0

        self._segment_decoder = codec.CRSDecoder()
        self._segment_decoder.set_params(segsize, k, n)

        if  not self._tail_data_size:
            self._tail_data_size = segsize

        self._tail_segment_size = mathutil.next_multiple(self._tail_data_size,
                                                         self._required_shares)
        if self._tail_segment_size == self._segment_size:
            self._tail_decoder = self._segment_decoder
        else:
            self._tail_decoder = codec.CRSDecoder()
            self._tail_decoder.set_params(self._tail_segment_size,
                                          self._required_shares,
                                          self._total_shares)

        self.log("got encoding parameters: "
                 "k: %d "
                 "n: %d "
                 "%d segments of %d bytes each (%d byte tail segment)" % \
                 (k, n, self._num_segments, self._segment_size,
                  self._tail_segment_size))

        if self._block_hash_trees is not None:
            for i in xrange(self._total_shares):
                # So we don't have to do this later.
                self._block_hash_trees[i] = hashtree.IncompleteHashTree(self._num_segments)

        # Our last task is to tell the downloader where to start and
        # where to stop. We use three parameters for that:
        #   - self._start_segment: the segment that we need to start
        #     downloading from. 
        #   - self._current_segment: the next segment that we need to
        #     download.
        #   - self._last_segment: The last segment that we were asked to
        #     download.
        #
        #  We say that the download is complete when
        #  self._current_segment > self._last_segment. We use
        #  self._start_segment and self._last_segment to know when to
        #  strip things off of segments, and how much to strip.
        if self._offset:
            self.log("got offset: %d" % self._offset)
            # our start segment is the first segment containing the
            # offset we were given. 
            start = self._offset // self._segment_size

            assert start < self._num_segments
            self._start_segment = start
            self.log("got start segment: %d" % self._start_segment)
        else:
            self._start_segment = 0


        # If self._read_length is None, then we want to read the whole
        # file. Otherwise, we want to read only part of the file, and
        # need to figure out where to stop reading.
        if self._read_length is not None:
            # our end segment is the last segment containing part of the
            # segment that we were asked to read.
            self.log("got read length %d" % self._read_length)
            if self._read_length != 0:
                end_data = self._offset + self._read_length

                # We don't actually need to read the byte at end_data,
                # but the one before it.
                end = (end_data - 1) // self._segment_size

                assert end < self._num_segments
                self._last_segment = end
            else:
                self._last_segment = self._start_segment
            self.log("got end segment: %d" % self._last_segment)
        else:
            self._last_segment = self._num_segments - 1

        self._current_segment = self._start_segment

    def _activate_enough_peers(self):
        """
        I populate self._active_readers with enough active readers to
        retrieve the contents of this mutable file. I am called before
        downloading starts, and (eventually) after each validation
        error, connection error, or other problem in the download.
        """
        # TODO: It would be cool to investigate other heuristics for
        # reader selection. For instance, the cost (in time the user
        # spends waiting for their file) of selecting a really slow peer
        # that happens to have a primary share is probably more than
        # selecting a really fast peer that doesn't have a primary
        # share. Maybe the servermap could be extended to provide this
        # information; it could keep track of latency information while
        # it gathers more important data, and then this routine could
        # use that to select active readers.
        #
        # (these and other questions would be easier to answer with a
        #  robust, configurable tahoe-lafs simulator, which modeled node
        #  failures, differences in node speed, and other characteristics
        #  that we expect storage servers to have.  You could have
        #  presets for really stable grids (like allmydata.com),
        #  friendnets, make it easy to configure your own settings, and
        #  then simulate the effect of big changes on these use cases
        #  instead of just reasoning about what the effect might be. Out
        #  of scope for MDMF, though.)

        # XXX: Why don't format= log messages work here?

        known_shnums = set(self.remaining_sharemap.keys())
        used_shnums = set([r.shnum for r in self._active_readers])
        unused_shnums = known_shnums - used_shnums

        if self._verify:
            new_shnums = unused_shnums # use them all
        elif len(self._active_readers) < self._required_shares:
            # need more shares
            more = self._required_shares - len(self._active_readers)
            # We favor lower numbered shares, since FEC is faster with
            # primary shares than with other shares, and lower-numbered
            # shares are more likely to be primary than higher numbered
            # shares.
            new_shnums = sorted(unused_shnums)[:more]
            if len(new_shnums) < more:
                # We don't have enough readers to retrieve the file; fail.
                self._raise_notenoughshareserror()
        else:
            new_shnums = []

        self.log("adding %d new peers to the active list" % len(new_shnums))
        for shnum in new_shnums:
            reader = self.readers[shnum]
            self._active_readers.append(reader)
            self.log("added reader for share %d" % shnum)
            # Each time we add a reader, we check to see if we need the
            # private key. If we do, we politely ask for it and then continue
            # computing. If we find that we haven't gotten it at the end of
            # segment decoding, then we'll take more drastic measures.
            if self._need_privkey and not self._node.is_readonly():
                d = reader.get_encprivkey()
                d.addCallback(self._try_to_validate_privkey, reader)
                # XXX: don't just drop the Deferred. We need error-reporting
                # but not flow-control here.
        assert len(self._active_readers) >= self._required_shares

    def _try_to_validate_prefix(self, prefix, reader):
        """
        I check that the prefix returned by a candidate server for
        retrieval matches the prefix that the servermap knows about
        (and, hence, the prefix that was validated earlier). If it does,
        I return True, which means that I approve of the use of the
        candidate server for segment retrieval. If it doesn't, I return
        False, which means that another server must be chosen.
        """
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         N,
         known_prefix,
         offsets_tuple) = self.verinfo
        if known_prefix != prefix:
            self.log("prefix from share %d doesn't match" % reader.shnum)
            raise UncoordinatedWriteError("Mismatched prefix -- this could "
                                          "indicate an uncoordinated write")
        # Otherwise, we're okay -- no issues.


    def _remove_reader(self, reader):
        """
        At various points, we will wish to remove a peer from
        consideration and/or use. These include, but are not necessarily
        limited to:

            - A connection error.
            - A mismatched prefix (that is, a prefix that does not match
              our conception of the version information string).
            - A failing block hash, salt hash, or share hash, which can
              indicate disk failure/bit flips, or network trouble.

        This method will do that. I will make sure that the
        (shnum,reader) combination represented by my reader argument is
        not used for anything else during this download. I will not
        advise the reader of any corruption, something that my callers
        may wish to do on their own.
        """
        # TODO: When you're done writing this, see if this is ever
        # actually used for something that _mark_bad_share isn't. I have
        # a feeling that they will be used for very similar things, and
        # that having them both here is just going to be an epic amount
        # of code duplication.
        #
        # (well, okay, not epic, but meaningful)
        self.log("removing reader %s" % reader)
        # Remove the reader from _active_readers
        self._active_readers.remove(reader)
        # TODO: self.readers.remove(reader)?
        for shnum in list(self.remaining_sharemap.keys()):
            self.remaining_sharemap.discard(shnum, reader.peerid)


    def _mark_bad_share(self, reader, f):
        """
        I mark the (peerid, shnum) encapsulated by my reader argument as
        a bad share, which means that it will not be used anywhere else.

        There are several reasons to want to mark something as a bad
        share. These include:

            - A connection error to the peer.
            - A mismatched prefix (that is, a prefix that does not match
              our local conception of the version information string).
            - A failing block hash, salt hash, share hash, or other
              integrity check.

        This method will ensure that readers that we wish to mark bad
        (for these reasons or other reasons) are not used for the rest
        of the download. Additionally, it will attempt to tell the
        remote peer (with no guarantee of success) that its share is
        corrupt.
        """
        self.log("marking share %d on server %s as bad" % \
                 (reader.shnum, reader))
        prefix = self.verinfo[-2]
        self.servermap.mark_bad_share(reader.peerid,
                                      reader.shnum,
                                      prefix)
        self._remove_reader(reader)
        self._bad_shares.add((reader.peerid, reader.shnum, f))
        self._status.problems[reader.peerid] = f
        self._last_failure = f
        self.notify_server_corruption(reader.peerid, reader.shnum,
                                      str(f.value))


    def _download_current_segment(self):
        """
        I download, validate, decode, decrypt, and assemble the segment
        that this Retrieve is currently responsible for downloading.
        """
        assert len(self._active_readers) >= self._required_shares
        if self._current_segment > self._last_segment:
            # No more segments to download, we're done.
            self.log("got plaintext, done")
            return self._done()
        self.log("on segment %d of %d" %
                 (self._current_segment + 1, self._num_segments))
        d = self._process_segment(self._current_segment)
        d.addCallback(lambda ign: self.loop())
        return d

    def _process_segment(self, segnum):
        """
        I download, validate, decode, and decrypt one segment of the
        file that this Retrieve is retrieving. This means coordinating
        the process of getting k blocks of that file, validating them,
        assembling them into one segment with the decoder, and then
        decrypting them.
        """
        self.log("processing segment %d" % segnum)

        # TODO: The old code uses a marker. Should this code do that
        # too? What did the Marker do?
        assert len(self._active_readers) >= self._required_shares

        # We need to ask each of our active readers for its block and
        # salt. We will then validate those. If validation is
        # successful, we will assemble the results into plaintext.
        ds = []
        for reader in self._active_readers:
            started = time.time()
            d = reader.get_block_and_salt(segnum)
            d2 = self._get_needed_hashes(reader, segnum)
            dl = defer.DeferredList([d, d2], consumeErrors=True)
            dl.addCallback(self._validate_block, segnum, reader, started)
            dl.addErrback(self._validation_or_decoding_failed, [reader])
            ds.append(dl)
        dl = defer.DeferredList(ds)
        if self._verify:
            dl.addCallback(lambda ignored: "")
            dl.addCallback(self._set_segment)
        else:
            dl.addCallback(self._maybe_decode_and_decrypt_segment, segnum)
        return dl


    def _maybe_decode_and_decrypt_segment(self, blocks_and_salts, segnum):
        """
        I take the results of fetching and validating the blocks from a
        callback chain in another method. If the results are such that
        they tell me that validation and fetching succeeded without
        incident, I will proceed with decoding and decryption.
        Otherwise, I will do nothing.
        """
        self.log("trying to decode and decrypt segment %d" % segnum)
        failures = False
        for block_and_salt in blocks_and_salts:
            if not block_and_salt[0] or block_and_salt[1] == None:
                self.log("some validation operations failed; not proceeding")
                failures = True
                break
        if not failures:
            self.log("everything looks ok, building segment %d" % segnum)
            d = self._decode_blocks(blocks_and_salts, segnum)
            d.addCallback(self._decrypt_segment)
            d.addErrback(self._validation_or_decoding_failed,
                         self._active_readers)
            # check to see whether we've been paused before writing
            # anything.
            d.addCallback(self._check_for_paused)
            d.addCallback(self._set_segment)
            return d
        else:
            return defer.succeed(None)


    def _set_segment(self, segment):
        """
        Given a plaintext segment, I register that segment with the
        target that is handling the file download.
        """
        self.log("got plaintext for segment %d" % self._current_segment)
        if self._current_segment == self._start_segment:
            # We're on the first segment. It's possible that we want
            # only some part of the end of this segment, and that we
            # just downloaded the whole thing to get that part. If so,
            # we need to account for that and give the reader just the
            # data that they want.
            n = self._offset % self._segment_size
            self.log("stripping %d bytes off of the first segment" % n)
            self.log("original segment length: %d" % len(segment))
            segment = segment[n:]
            self.log("new segment length: %d" % len(segment))

        if self._current_segment == self._last_segment and self._read_length is not None:
            # We're on the last segment. It's possible that we only want
            # part of the beginning of this segment, and that we
            # downloaded the whole thing anyway. Make sure to give the
            # caller only the portion of the segment that they want to
            # receive.
            extra = self._read_length
            if self._start_segment != self._last_segment:
                extra -= self._segment_size - \
                            (self._offset % self._segment_size)
            extra %= self._segment_size
            self.log("original segment length: %d" % len(segment))
            segment = segment[:extra]
            self.log("new segment length: %d" % len(segment))
            self.log("only taking %d bytes of the last segment" % extra)

        if not self._verify:
            self._consumer.write(segment)
        else:
            # we don't care about the plaintext if we are doing a verify.
            segment = None
        self._current_segment += 1


    def _validation_or_decoding_failed(self, f, readers):
        """
        I am called when a block or a salt fails to correctly validate, or when
        the decryption or decoding operation fails for some reason.  I react to
        this failure by notifying the remote server of corruption, and then
        removing the remote peer from further activity.
        """
        assert isinstance(readers, list)
        bad_shnums = [reader.shnum for reader in readers]

        self.log("validation or decoding failed on share(s) %s, peer(s) %s "
                 ", segment %d: %s" % \
                 (bad_shnums, readers, self._current_segment, str(f)))
        for reader in readers:
            self._mark_bad_share(reader, f)
        return


    def _validate_block(self, results, segnum, reader, started):
        """
        I validate a block from one share on a remote server.
        """
        # Grab the part of the block hash tree that is necessary to
        # validate this block, then generate the block hash root.
        self.log("validating share %d for segment %d" % (reader.shnum,
                                                             segnum))
        elapsed = time.time() - started
        self._status.add_fetch_timing(reader.peerid, elapsed)
        self._set_current_status("validating blocks")
        # Did we fail to fetch either of the things that we were
        # supposed to? Fail if so.
        if not results[0][0] and results[1][0]:
            # handled by the errback handler.

            # These all get batched into one query, so the resulting
            # failure should be the same for all of them, so we can just
            # use the first one.
            assert isinstance(results[0][1], failure.Failure)

            f = results[0][1]
            raise CorruptShareError(reader.peerid,
                                    reader.shnum,
                                    "Connection error: %s" % str(f))

        block_and_salt, block_and_sharehashes = results
        block, salt = block_and_salt[1]
        blockhashes, sharehashes = block_and_sharehashes[1]

        blockhashes = dict(enumerate(blockhashes[1]))
        self.log("the reader gave me the following blockhashes: %s" % \
                 blockhashes.keys())
        self.log("the reader gave me the following sharehashes: %s" % \
                 sharehashes[1].keys())
        bht = self._block_hash_trees[reader.shnum]

        if bht.needed_hashes(segnum, include_leaf=True):
            try:
                bht.set_hashes(blockhashes)
            except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                    IndexError), e:
                raise CorruptShareError(reader.peerid,
                                        reader.shnum,
                                        "block hash tree failure: %s" % e)

        if self._version == MDMF_VERSION:
            blockhash = hashutil.block_hash(salt + block)
        else:
            blockhash = hashutil.block_hash(block)
        # If this works without an error, then validation is
        # successful.
        try:
           bht.set_hashes(leaves={segnum: blockhash})
        except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                IndexError), e:
            raise CorruptShareError(reader.peerid,
                                    reader.shnum,
                                    "block hash tree failure: %s" % e)
Esempio n. 12
0
class Retrieve:
    # this class is currently single-use. Eventually (in MDMF) we will make
    # it multi-use, in which case you can call download(range) multiple
    # times, and each will have a separate response chain. However the
    # Retrieve object will remain tied to a specific version of the file, and
    # will use a single ServerMap instance.
    implements(IPushProducer)

    def __init__(self,
                 filenode,
                 storage_broker,
                 servermap,
                 verinfo,
                 fetch_privkey=False,
                 verify=False):
        self._node = filenode
        assert self._node.get_pubkey()
        self._storage_broker = storage_broker
        self._storage_index = filenode.get_storage_index()
        assert self._node.get_readkey()
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        assert self._node.get_pubkey()
        self.verinfo = verinfo
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate a new version.
        self._need_privkey = verify or (fetch_privkey
                                        and not self._node.get_privkey())

        if self._need_privkey:
            # TODO: Evaluate the need for this. We'll use it if we want
            # to limit how many queries are on the wire for the privkey
            # at once.
            self._privkey_query_markers = []  # one Marker for each time we've
            # tried to get the privkey.

        # verify means that we are using the downloader logic to verify all
        # of our shares. This tells the downloader a few things.
        #
        # 1. We need to download all of the shares.
        # 2. We don't need to decode or decrypt the shares, since our
        #    caller doesn't care about the plaintext, only the
        #    information about which shares are or are not valid.
        # 3. When we are validating readers, we need to validate the
        #    signature on the prefix. Do we? We already do this in the
        #    servermap update?
        self._verify = verify

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)
        self.readers = {}
        self._stopped = False
        self._pause_deferred = None
        self._offset = None
        self._read_length = None
        self.log("got seqnum %d" % self.verinfo[0])

    def get_status(self):
        return self._status

    def log(self, *args, **kwargs):
        if "parent" not in kwargs:
            kwargs["parent"] = self._log_number
        if "facility" not in kwargs:
            kwargs["facility"] = "tahoe.mutable.retrieve"
        return log.msg(*args, **kwargs)

    def _set_current_status(self, state):
        seg = "%d/%d" % (self._current_segment, self._last_segment)
        self._status.set_status("segment %s (%s)" % (seg, state))

    ###################
    # IPushProducer

    def pauseProducing(self):
        """
        I am called by my download target if we have produced too much
        data for it to handle. I make the downloader stop producing new
        data until my resumeProducing method is called.
        """
        if self._pause_deferred is not None:
            return

        # fired when the download is unpaused.
        self._old_status = self._status.get_status()
        self._set_current_status("paused")

        self._pause_deferred = defer.Deferred()

    def resumeProducing(self):
        """
        I am called by my download target once it is ready to begin
        receiving data again.
        """
        if self._pause_deferred is None:
            return

        p = self._pause_deferred
        self._pause_deferred = None
        self._status.set_status(self._old_status)

        eventually(p.callback, None)

    def stopProducing(self):
        self._stopped = True
        self.resumeProducing()

    def _check_for_paused(self, res):
        """
        I am called just before a write to the consumer. I return a
        Deferred that eventually fires with the data that is to be
        written to the consumer. If the download has not been paused,
        the Deferred fires immediately. Otherwise, the Deferred fires
        when the downloader is unpaused.
        """
        if self._pause_deferred is not None:
            d = defer.Deferred()
            self._pause_deferred.addCallback(lambda ignored: d.callback(res))
            return d
        return res

    def _check_for_stopped(self, res):
        if self._stopped:
            raise DownloadStopped("our Consumer called stopProducing()")
        return res

    def download(self, consumer=None, offset=0, size=None):
        assert IConsumer.providedBy(consumer) or self._verify

        if consumer:
            self._consumer = consumer
            # we provide IPushProducer, so streaming=True, per
            # IConsumer.
            self._consumer.registerProducer(self, streaming=True)

        self._done_deferred = defer.Deferred()
        self._offset = offset
        self._read_length = size
        self._setup_encoding_parameters()
        self._setup_download()
        self.log("starting download")
        self._started_fetching = time.time()
        # The download process beyond this is a state machine.
        # _add_active_servers will select the servers that we want to use
        # for the download, and then attempt to start downloading. After
        # each segment, it will check for doneness, reacting to broken
        # servers and corrupt shares as necessary. If it runs out of good
        # servers before downloading all of the segments, _done_deferred
        # will errback.  Otherwise, it will eventually callback with the
        # contents of the mutable file.
        self.loop()
        return self._done_deferred

    def loop(self):
        d = fireEventually(None)  # avoid #237 recursion limit problem
        d.addCallback(lambda ign: self._activate_enough_servers())
        d.addCallback(lambda ign: self._download_current_segment())
        # when we're done, _download_current_segment will call _done. If we
        # aren't, it will call loop() again.
        d.addErrback(self._error)

    def _setup_download(self):
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, server, timestamp) in shares:
            self.remaining_sharemap.add(shnum, server)
            # Reuse the SlotReader from the servermap.
            key = (self.verinfo, server.get_serverid(), self._storage_index,
                   shnum)
            if key in self.servermap.proxies:
                reader = self.servermap.proxies[key]
            else:
                reader = MDMFSlotReadProxy(server.get_rref(),
                                           self._storage_index, shnum, None)
            reader.server = server
            self.readers[shnum] = reader

        if len(self.remaining_sharemap) < k:
            self._raise_notenoughshareserror()

        self.shares = {}  # maps shnum to validated blocks
        self._active_readers = []  # list of active readers for this dl.
        self._block_hash_trees = {}  # shnum => hashtree

        for i in xrange(self._total_shares):
            # So we don't have to do this later.
            self._block_hash_trees[i] = hashtree.IncompleteHashTree(
                self._num_segments)

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})

    def decode(self, blocks_and_salts, segnum):
        """
        I am a helper method that the mutable file update process uses
        as a shortcut to decode and decrypt the segments that it needs
        to fetch in order to perform a file update. I take in a
        collection of blocks and salts, and pick some of those to make a
        segment with. I return the plaintext associated with that
        segment.
        """
        # We don't need the block hash trees in this case.
        self._block_hash_trees = None
        self._setup_encoding_parameters()

        # _decode_blocks() expects the output of a gatherResults that
        # contains the outputs of _validate_block() (each of which is a dict
        # mapping shnum to (block,salt) bytestrings).
        d = self._decode_blocks([blocks_and_salts], segnum)
        d.addCallback(self._decrypt_segment)
        return d

    def _setup_encoding_parameters(self):
        """
        I set up the encoding parameters, including k, n, the number
        of segments associated with this file, and the segment decoders.
        """
        (seqnum, root_hash, IV, segsize, datalength, k, n, known_prefix,
         offsets_tuple) = self.verinfo
        self._required_shares = k
        self._total_shares = n
        self._segment_size = segsize
        self._data_length = datalength

        if not IV:
            self._version = MDMF_VERSION
        else:
            self._version = SDMF_VERSION

        if datalength and segsize:
            self._num_segments = mathutil.div_ceil(datalength, segsize)
            self._tail_data_size = datalength % segsize
        else:
            self._num_segments = 0
            self._tail_data_size = 0

        self._segment_decoder = codec.CRSDecoder()
        self._segment_decoder.set_params(segsize, k, n)

        if not self._tail_data_size:
            self._tail_data_size = segsize

        self._tail_segment_size = mathutil.next_multiple(
            self._tail_data_size, self._required_shares)
        if self._tail_segment_size == self._segment_size:
            self._tail_decoder = self._segment_decoder
        else:
            self._tail_decoder = codec.CRSDecoder()
            self._tail_decoder.set_params(self._tail_segment_size,
                                          self._required_shares,
                                          self._total_shares)

        self.log("got encoding parameters: "
                 "k: %d "
                 "n: %d "
                 "%d segments of %d bytes each (%d byte tail segment)" % \
                 (k, n, self._num_segments, self._segment_size,
                  self._tail_segment_size))

        # Our last task is to tell the downloader where to start and
        # where to stop. We use three parameters for that:
        #   - self._start_segment: the segment that we need to start
        #     downloading from.
        #   - self._current_segment: the next segment that we need to
        #     download.
        #   - self._last_segment: The last segment that we were asked to
        #     download.
        #
        #  We say that the download is complete when
        #  self._current_segment > self._last_segment. We use
        #  self._start_segment and self._last_segment to know when to
        #  strip things off of segments, and how much to strip.
        if self._offset:
            self.log("got offset: %d" % self._offset)
            # our start segment is the first segment containing the
            # offset we were given.
            start = self._offset // self._segment_size

            assert start < self._num_segments
            self._start_segment = start
            self.log("got start segment: %d" % self._start_segment)
        else:
            self._start_segment = 0

        # If self._read_length is None, then we want to read the whole
        # file. Otherwise, we want to read only part of the file, and
        # need to figure out where to stop reading.
        if self._read_length is not None:
            # our end segment is the last segment containing part of the
            # segment that we were asked to read.
            self.log("got read length %d" % self._read_length)
            if self._read_length != 0:
                end_data = self._offset + self._read_length

                # We don't actually need to read the byte at end_data,
                # but the one before it.
                end = (end_data - 1) // self._segment_size

                assert end < self._num_segments
                self._last_segment = end
            else:
                self._last_segment = self._start_segment
            self.log("got end segment: %d" % self._last_segment)
        else:
            self._last_segment = self._num_segments - 1

        self._current_segment = self._start_segment

    def _activate_enough_servers(self):
        """
        I populate self._active_readers with enough active readers to
        retrieve the contents of this mutable file. I am called before
        downloading starts, and (eventually) after each validation
        error, connection error, or other problem in the download.
        """
        # TODO: It would be cool to investigate other heuristics for
        # reader selection. For instance, the cost (in time the user
        # spends waiting for their file) of selecting a really slow server
        # that happens to have a primary share is probably more than
        # selecting a really fast server that doesn't have a primary
        # share. Maybe the servermap could be extended to provide this
        # information; it could keep track of latency information while
        # it gathers more important data, and then this routine could
        # use that to select active readers.
        #
        # (these and other questions would be easier to answer with a
        #  robust, configurable tahoe-lafs simulator, which modeled node
        #  failures, differences in node speed, and other characteristics
        #  that we expect storage servers to have.  You could have
        #  presets for really stable grids (like allmydata.com),
        #  friendnets, make it easy to configure your own settings, and
        #  then simulate the effect of big changes on these use cases
        #  instead of just reasoning about what the effect might be. Out
        #  of scope for MDMF, though.)

        # XXX: Why don't format= log messages work here?

        known_shnums = set(self.remaining_sharemap.keys())
        used_shnums = set([r.shnum for r in self._active_readers])
        unused_shnums = known_shnums - used_shnums

        if self._verify:
            new_shnums = unused_shnums  # use them all
        elif len(self._active_readers) < self._required_shares:
            # need more shares
            more = self._required_shares - len(self._active_readers)
            # We favor lower numbered shares, since FEC is faster with
            # primary shares than with other shares, and lower-numbered
            # shares are more likely to be primary than higher numbered
            # shares.
            new_shnums = sorted(unused_shnums)[:more]
            if len(new_shnums) < more:
                # We don't have enough readers to retrieve the file; fail.
                self._raise_notenoughshareserror()
        else:
            new_shnums = []

        self.log("adding %d new servers to the active list" % len(new_shnums))
        for shnum in new_shnums:
            reader = self.readers[shnum]
            self._active_readers.append(reader)
            self.log("added reader for share %d" % shnum)
            # Each time we add a reader, we check to see if we need the
            # private key. If we do, we politely ask for it and then continue
            # computing. If we find that we haven't gotten it at the end of
            # segment decoding, then we'll take more drastic measures.
            if self._need_privkey and not self._node.is_readonly():
                d = reader.get_encprivkey()
                d.addCallback(self._try_to_validate_privkey, reader,
                              reader.server)
                # XXX: don't just drop the Deferred. We need error-reporting
                # but not flow-control here.

    def _try_to_validate_prefix(self, prefix, reader):
        """
        I check that the prefix returned by a candidate server for
        retrieval matches the prefix that the servermap knows about
        (and, hence, the prefix that was validated earlier). If it does,
        I return True, which means that I approve of the use of the
        candidate server for segment retrieval. If it doesn't, I return
        False, which means that another server must be chosen.
        """
        (seqnum, root_hash, IV, segsize, datalength, k, N, known_prefix,
         offsets_tuple) = self.verinfo
        if known_prefix != prefix:
            self.log("prefix from share %d doesn't match" % reader.shnum)
            raise UncoordinatedWriteError("Mismatched prefix -- this could "
                                          "indicate an uncoordinated write")
        # Otherwise, we're okay -- no issues.

    def _mark_bad_share(self, server, shnum, reader, f):
        """
        I mark the given (server, shnum) as a bad share, which means that it
        will not be used anywhere else.

        There are several reasons to want to mark something as a bad
        share. These include:

            - A connection error to the server.
            - A mismatched prefix (that is, a prefix that does not match
              our local conception of the version information string).
            - A failing block hash, salt hash, share hash, or other
              integrity check.

        This method will ensure that readers that we wish to mark bad
        (for these reasons or other reasons) are not used for the rest
        of the download. Additionally, it will attempt to tell the
        remote server (with no guarantee of success) that its share is
        corrupt.
        """
        self.log("marking share %d on server %s as bad" % \
                 (shnum, server.get_name()))
        prefix = self.verinfo[-2]
        self.servermap.mark_bad_share(server, shnum, prefix)
        self._bad_shares.add((server, shnum, f))
        self._status.add_problem(server, f)
        self._last_failure = f

        # Remove the reader from _active_readers
        self._active_readers.remove(reader)
        for shnum in list(self.remaining_sharemap.keys()):
            self.remaining_sharemap.discard(shnum, reader.server)

        if f.check(BadShareError):
            self.notify_server_corruption(server, shnum, str(f.value))

    def _download_current_segment(self):
        """
        I download, validate, decode, decrypt, and assemble the segment
        that this Retrieve is currently responsible for downloading.
        """
        if self._current_segment > self._last_segment:
            # No more segments to download, we're done.
            self.log("got plaintext, done")
            return self._done()
        elif self._verify and len(self._active_readers) == 0:
            self.log("no more good shares, no need to keep verifying")
            return self._done()
        self.log("on segment %d of %d" %
                 (self._current_segment + 1, self._num_segments))
        d = self._process_segment(self._current_segment)
        d.addCallback(lambda ign: self.loop())
        return d

    def _process_segment(self, segnum):
        """
        I download, validate, decode, and decrypt one segment of the
        file that this Retrieve is retrieving. This means coordinating
        the process of getting k blocks of that file, validating them,
        assembling them into one segment with the decoder, and then
        decrypting them.
        """
        self.log("processing segment %d" % segnum)

        # TODO: The old code uses a marker. Should this code do that
        # too? What did the Marker do?

        # We need to ask each of our active readers for its block and
        # salt. We will then validate those. If validation is
        # successful, we will assemble the results into plaintext.
        ds = []
        for reader in self._active_readers:
            started = time.time()
            d1 = reader.get_block_and_salt(segnum)
            d2, d3 = self._get_needed_hashes(reader, segnum)
            d = deferredutil.gatherResults([d1, d2, d3])
            d.addCallback(self._validate_block, segnum, reader, reader.server,
                          started)
            # _handle_bad_share takes care of recoverable errors (by dropping
            # that share and returning None). Any other errors (i.e. code
            # bugs) are passed through and cause the retrieve to fail.
            d.addErrback(self._handle_bad_share, [reader])
            ds.append(d)
        dl = deferredutil.gatherResults(ds)
        if self._verify:
            dl.addCallback(lambda ignored: "")
            dl.addCallback(self._set_segment)
        else:
            dl.addCallback(self._maybe_decode_and_decrypt_segment, segnum)
        return dl

    def _maybe_decode_and_decrypt_segment(self, results, segnum):
        """
        I take the results of fetching and validating the blocks from
        _process_segment. If validation and fetching succeeded without
        incident, I will proceed with decoding and decryption. Otherwise, I
        will do nothing.
        """
        self.log("trying to decode and decrypt segment %d" % segnum)

        # 'results' is the output of a gatherResults set up in
        # _process_segment(). Each component Deferred will either contain the
        # non-Failure output of _validate_block() for a single block (i.e.
        # {segnum:(block,salt)}), or None if _validate_block threw an
        # exception and _validation_or_decoding_failed handled it (by
        # dropping that server).

        if None in results:
            self.log("some validation operations failed; not proceeding")
            return defer.succeed(None)
        self.log("everything looks ok, building segment %d" % segnum)
        d = self._decode_blocks(results, segnum)
        d.addCallback(self._decrypt_segment)
        # check to see whether we've been paused before writing
        # anything.
        d.addCallback(self._check_for_paused)
        d.addCallback(self._check_for_stopped)
        d.addCallback(self._set_segment)
        return d

    def _set_segment(self, segment):
        """
        Given a plaintext segment, I register that segment with the
        target that is handling the file download.
        """
        self.log("got plaintext for segment %d" % self._current_segment)
        if self._current_segment == self._start_segment:
            # We're on the first segment. It's possible that we want
            # only some part of the end of this segment, and that we
            # just downloaded the whole thing to get that part. If so,
            # we need to account for that and give the reader just the
            # data that they want.
            n = self._offset % self._segment_size
            self.log("stripping %d bytes off of the first segment" % n)
            self.log("original segment length: %d" % len(segment))
            segment = segment[n:]
            self.log("new segment length: %d" % len(segment))

        if self._current_segment == self._last_segment and self._read_length is not None:
            # We're on the last segment. It's possible that we only want
            # part of the beginning of this segment, and that we
            # downloaded the whole thing anyway. Make sure to give the
            # caller only the portion of the segment that they want to
            # receive.
            extra = self._read_length
            if self._start_segment != self._last_segment:
                extra -= self._segment_size - \
                            (self._offset % self._segment_size)
            extra %= self._segment_size
            self.log("original segment length: %d" % len(segment))
            segment = segment[:extra]
            self.log("new segment length: %d" % len(segment))
            self.log("only taking %d bytes of the last segment" % extra)

        if not self._verify:
            self._consumer.write(segment)
        else:
            # we don't care about the plaintext if we are doing a verify.
            segment = None
        self._current_segment += 1

    def _handle_bad_share(self, f, readers):
        """
        I am called when a block or a salt fails to correctly validate, or when
        the decryption or decoding operation fails for some reason.  I react to
        this failure by notifying the remote server of corruption, and then
        removing the remote server from further activity.
        """
        # these are the errors we can tolerate: by giving up on this share
        # and finding others to replace it. Any other errors (i.e. coding
        # bugs) are re-raised, causing the download to fail.
        f.trap(DeadReferenceError, RemoteException, BadShareError)

        # DeadReferenceError happens when we try to fetch data from a server
        # that has gone away. RemoteException happens if the server had an
        # internal error. BadShareError encompasses: (UnknownVersionError,
        # LayoutInvalid, struct.error) which happen when we get obviously
        # wrong data, and CorruptShareError which happens later, when we
        # perform integrity checks on the data.

        assert isinstance(readers, list)
        bad_shnums = [reader.shnum for reader in readers]

        self.log("validation or decoding failed on share(s) %s, server(s) %s "
                 ", segment %d: %s" % \
                 (bad_shnums, readers, self._current_segment, str(f)))
        for reader in readers:
            self._mark_bad_share(reader.server, reader.shnum, reader, f)
        return None

    def _validate_block(self, results, segnum, reader, server, started):
        """
        I validate a block from one share on a remote server.
        """
        # Grab the part of the block hash tree that is necessary to
        # validate this block, then generate the block hash root.
        self.log("validating share %d for segment %d" % (reader.shnum, segnum))
        elapsed = time.time() - started
        self._status.add_fetch_timing(server, elapsed)
        self._set_current_status("validating blocks")

        block_and_salt, blockhashes, sharehashes = results
        block, salt = block_and_salt
        assert type(block) is str, (block, salt)

        blockhashes = dict(enumerate(blockhashes))
        self.log("the reader gave me the following blockhashes: %s" % \
                 blockhashes.keys())
        self.log("the reader gave me the following sharehashes: %s" % \
                 sharehashes.keys())
        bht = self._block_hash_trees[reader.shnum]

        if bht.needed_hashes(segnum, include_leaf=True):
            try:
                bht.set_hashes(blockhashes)
            except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                    IndexError), e:
                raise CorruptShareError(server, reader.shnum,
                                        "block hash tree failure: %s" % e)

        if self._version == MDMF_VERSION:
            blockhash = hashutil.block_hash(salt + block)
        else:
            blockhash = hashutil.block_hash(block)
        # If this works without an error, then validation is
        # successful.
        try:
            bht.set_hashes(leaves={segnum: blockhash})
        except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                IndexError), e:
            raise CorruptShareError(server, reader.shnum,
                                    "block hash tree failure: %s" % e)
Esempio n. 13
0
    def _send_shares(self, needed):
        self.log("_send_shares")

        # we're finally ready to send out our shares. If we encounter any
        # surprises here, it's because somebody else is writing at the same
        # time. (Note: in the future, when we remove the _query_peers() step
        # and instead speculate about [or remember] which shares are where,
        # surprises here are *not* indications of UncoordinatedWriteError,
        # and we'll need to respond to them more gracefully.)

        # needed is a set of (peerid, shnum) tuples. The first thing we do is
        # organize it by peerid.

        peermap = DictOfSets()
        for (peerid, shnum) in needed:
            peermap.add(peerid, shnum)

        # the next thing is to build up a bunch of test vectors. The
        # semantics of Publish are that we perform the operation if the world
        # hasn't changed since the ServerMap was constructed (more or less).
        # For every share we're trying to place, we create a test vector that
        # tests to see if the server*share still corresponds to the
        # map.

        all_tw_vectors = {} # maps peerid to tw_vectors
        sm = self._servermap.servermap

        for key in needed:
            (peerid, shnum) = key

            if key in sm:
                # an old version of that share already exists on the
                # server, according to our servermap. We will create a
                # request that attempts to replace it.
                old_versionid, old_timestamp = sm[key]
                (old_seqnum, old_root_hash, old_salt, old_segsize,
                 old_datalength, old_k, old_N, old_prefix,
                 old_offsets_tuple) = old_versionid
                old_checkstring = pack_checkstring(old_seqnum,
                                                   old_root_hash,
                                                   old_salt)
                testv = (0, len(old_checkstring), "eq", old_checkstring)

            elif key in self.bad_share_checkstrings:
                old_checkstring = self.bad_share_checkstrings[key]
                testv = (0, len(old_checkstring), "eq", old_checkstring)

            else:
                # add a testv that requires the share not exist

                # Unfortunately, foolscap-0.2.5 has a bug in the way inbound
                # constraints are handled. If the same object is referenced
                # multiple times inside the arguments, foolscap emits a
                # 'reference' token instead of a distinct copy of the
                # argument. The bug is that these 'reference' tokens are not
                # accepted by the inbound constraint code. To work around
                # this, we need to prevent python from interning the
                # (constant) tuple, by creating a new copy of this vector
                # each time.

                # This bug is fixed in foolscap-0.2.6, and even though this
                # version of Tahoe requires foolscap-0.3.1 or newer, we are
                # supposed to be able to interoperate with older versions of
                # Tahoe which are allowed to use older versions of foolscap,
                # including foolscap-0.2.5 . In addition, I've seen other
                # foolscap problems triggered by 'reference' tokens (see #541
                # for details). So we must keep this workaround in place.

                #testv = (0, 1, 'eq', "")
                testv = tuple([0, 1, 'eq', ""])

            testvs = [testv]
            # the write vector is simply the share
            writev = [(0, self.shares[shnum])]

            if peerid not in all_tw_vectors:
                all_tw_vectors[peerid] = {}
                # maps shnum to (testvs, writevs, new_length)
            assert shnum not in all_tw_vectors[peerid]

            all_tw_vectors[peerid][shnum] = (testvs, writev, None)

        # we read the checkstring back from each share, however we only use
        # it to detect whether there was a new share that we didn't know
        # about. The success or failure of the write will tell us whether
        # there was a collision or not. If there is a collision, the first
        # thing we'll do is update the servermap, which will find out what
        # happened. We could conceivably reduce a roundtrip by using the
        # readv checkstring to populate the servermap, but really we'd have
        # to read enough data to validate the signatures too, so it wouldn't
        # be an overall win.
        read_vector = [(0, struct.calcsize(SIGNED_PREFIX))]

        # ok, send the messages!
        self.log("sending %d shares" % len(all_tw_vectors), level=log.NOISY)
        started = time.time()
        for (peerid, tw_vectors) in all_tw_vectors.items():

            write_enabler = self._node.get_write_enabler(peerid)
            renew_secret = self._node.get_renewal_secret(peerid)
            cancel_secret = self._node.get_cancel_secret(peerid)
            secrets = (write_enabler, renew_secret, cancel_secret)
            shnums = tw_vectors.keys()

            for shnum in shnums:
                self.outstanding.add( (peerid, shnum) )

            d = self._do_testreadwrite(peerid, secrets,
                                       tw_vectors, read_vector)
            d.addCallbacks(self._got_write_answer, self._got_write_error,
                           callbackArgs=(peerid, shnums, started),
                           errbackArgs=(peerid, shnums, started))
            # tolerate immediate errback, like with DeadReferenceError
            d.addBoth(fireEventually)
            d.addCallback(self.loop)
            d.addErrback(self._fatal_error)

        self._update_status()
        self.log("%d shares sent" % len(all_tw_vectors), level=log.NOISY)
Esempio n. 14
0
class SegmentFetcher:
    """I am responsible for acquiring blocks for a single segment. I will use
    the Share instances passed to my add_shares() method to locate, retrieve,
    and validate those blocks. I expect my parent node to call my
    no_more_shares() method when there are no more shares available. I will
    call my parent's want_more_shares() method when I want more: I expect to
    see at least one call to add_shares or no_more_shares afterwards.

    When I have enough validated blocks, I will call my parent's
    process_blocks() method with a dictionary that maps shnum to blockdata.
    If I am unable to provide enough blocks, I will call my parent's
    fetch_failed() method with (self, f). After either of these events, I
    will shut down and do no further work. My parent can also call my stop()
    method to have me shut down early."""

    def __init__(self, node, segnum, k, logparent):
        self._node = node # _Node
        self.segnum = segnum
        self._k = k
        self._shares = [] # unused Share instances, sorted by "goodness"
                          # (RTT), then shnum. This is populated when DYHB
                          # responses arrive, or (for later segments) at
                          # startup. We remove shares from it when we call
                          # sh.get_block() on them.
        self._shares_from_server = DictOfSets() # maps serverid to set of
                                                # Shares on that server for
                                                # which we have outstanding
                                                # get_block() calls.
        self._max_shares_per_server = 1 # how many Shares we're allowed to
                                        # pull from each server. This starts
                                        # at 1 and grows if we don't have
                                        # sufficient diversity.
        self._active_share_map = {} # maps shnum to outstanding (and not
                                    # OVERDUE) Share that provides it.
        self._overdue_share_map = DictOfSets() # shares in the OVERDUE state
        self._lp = logparent
        self._share_observers = {} # maps Share to EventStreamObserver for
                                   # active ones
        self._blocks = {} # maps shnum to validated block data
        self._no_more_shares = False
        self._last_failure = None
        self._running = True

    def stop(self):
        log.msg("SegmentFetcher(%s).stop" % self._node._si_prefix,
                level=log.NOISY, parent=self._lp, umid="LWyqpg")
        self._cancel_all_requests()
        self._running = False
        # help GC ??? XXX
        del self._shares, self._shares_from_server, self._active_share_map
        del self._share_observers


    # called by our parent _Node

    def add_shares(self, shares):
        # called when ShareFinder locates a new share, and when a non-initial
        # segment fetch is started and we already know about shares from the
        # previous segment
        self._shares.extend(shares)
        self._shares.sort(key=lambda s: (s._dyhb_rtt, s._shnum) )
        eventually(self.loop)

    def no_more_shares(self):
        # ShareFinder tells us it's reached the end of its list
        self._no_more_shares = True
        eventually(self.loop)

    # internal methods

    def loop(self):
        try:
            # if any exception occurs here, kill the download
            self._do_loop()
        except BaseException:
            self._node.fetch_failed(self, Failure())
            raise

    def _do_loop(self):
        k = self._k
        if not self._running:
            return
        numsegs, authoritative = self._node.get_num_segments()
        if authoritative and self.segnum >= numsegs:
            # oops, we were asking for a segment number beyond the end of the
            # file. This is an error.
            self.stop()
            e = BadSegmentNumberError("segnum=%d, numsegs=%d" %
                                      (self.segnum, self._node.num_segments))
            f = Failure(e)
            self._node.fetch_failed(self, f)
            return

        #print "LOOP", self._blocks.keys(), "active:", self._active_share_map, "overdue:", self._overdue_share_map, "unused:", self._shares
        # Should we sent out more requests?
        while len(set(self._blocks.keys())
                  | set(self._active_share_map.keys())
                  ) < k:
            # we don't have data or active requests for enough shares. Are
            # there any unused shares we can start using?
            (sent_something, want_more_diversity) = self._find_and_use_share()
            if sent_something:
                # great. loop back around in case we need to send more.
                continue
            if want_more_diversity:
                # we could have sent something if we'd been allowed to pull
                # more shares per server. Increase the limit and try again.
                self._max_shares_per_server += 1
                log.msg("SegmentFetcher(%s) increasing diversity limit to %d"
                        % (self._node._si_prefix, self._max_shares_per_server),
                        level=log.NOISY, umid="xY2pBA")
                # Also ask for more shares, in the hopes of achieving better
                # diversity for the next segment.
                self._ask_for_more_shares()
                continue
            # we need more shares than the ones in self._shares to make
            # progress
            self._ask_for_more_shares()
            if self._no_more_shares:
                # But there are no more shares to be had. If we're going to
                # succeed, it will be with the shares we've already seen.
                # Will they be enough?
                if len(set(self._blocks.keys())
                       | set(self._active_share_map.keys())
                       | set(self._overdue_share_map.keys())
                       ) < k:
                    # nope. bail.
                    self._no_shares_error() # this calls self.stop()
                    return
                # our outstanding or overdue requests may yet work.
            # more shares may be coming. Wait until then.
            return

        # are we done?
        if len(set(self._blocks.keys())) >= k:
            # yay!
            self.stop()
            self._node.process_blocks(self.segnum, self._blocks)
            return

    def _no_shares_error(self):
        if not (self._shares or self._active_share_map or
                self._overdue_share_map or self._blocks):
            format = ("no shares (need %(k)d)."
                      " Last failure: %(last_failure)s")
            args = { "k": self._k,
                     "last_failure": self._last_failure }
            error = NoSharesError
        else:
            format = ("ran out of shares: complete=%(complete)s"
                      " pending=%(pending)s overdue=%(overdue)s"
                      " unused=%(unused)s need %(k)d."
                      " Last failure: %(last_failure)s")
            def join(shnums): return ",".join(["sh%d" % shnum
                                               for shnum in sorted(shnums)])
            pending_s = ",".join([str(sh)
                                  for sh in self._active_share_map.values()])
            overdue = set()
            for shares in self._overdue_share_map.values():
                overdue |= shares
            overdue_s = ",".join([str(sh) for sh in overdue])
            args = {"complete": join(self._blocks.keys()),
                    "pending": pending_s,
                    "overdue": overdue_s,
                    # 'unused' should be zero
                    "unused": ",".join([str(sh) for sh in self._shares]),
                    "k": self._k,
                    "last_failure": self._last_failure,
                    }
            error = NotEnoughSharesError
        log.msg(format=format,
                level=log.UNUSUAL, parent=self._lp, umid="1DsnTg",
                **args)
        e = error(format % args)
        f = Failure(e)
        self.stop()
        self._node.fetch_failed(self, f)

    def _find_and_use_share(self):
        sent_something = False
        want_more_diversity = False
        for sh in self._shares: # find one good share to fetch
            shnum = sh._shnum ; serverid = sh._peerid
            if shnum in self._blocks:
                continue # don't request data we already have
            if shnum in self._active_share_map:
                # note: OVERDUE shares are removed from _active_share_map
                # and added to _overdue_share_map instead.
                continue # don't send redundant requests
            sfs = self._shares_from_server
            if len(sfs.get(serverid,set())) >= self._max_shares_per_server:
                # don't pull too much from a single server
                want_more_diversity = True
                continue
            # ok, we can use this share
            self._shares.remove(sh)
            self._active_share_map[shnum] = sh
            self._shares_from_server.add(serverid, sh)
            self._start_share(sh, shnum)
            sent_something = True
            break
        return (sent_something, want_more_diversity)

    def _start_share(self, share, shnum):
        self._share_observers[share] = o = share.get_block(self.segnum)
        o.subscribe(self._block_request_activity, share=share, shnum=shnum)

    def _ask_for_more_shares(self):
        if not self._no_more_shares:
            self._node.want_more_shares()
            # that will trigger the ShareFinder to keep looking, and call our
            # add_shares() or no_more_shares() later.

    def _cancel_all_requests(self):
        for o in self._share_observers.values():
            o.cancel()
        self._share_observers = {}

    def _block_request_activity(self, share, shnum, state, block=None, f=None):
        # called by Shares, in response to our s.send_request() calls.
        if not self._running:
            return
        log.msg("SegmentFetcher(%s)._block_request_activity:"
                " Share(sh%d-on-%s) -> %s" %
                (self._node._si_prefix, shnum, share._peerid_s, state),
                level=log.NOISY, parent=self._lp, umid="vilNWA")
        # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. Remove the share
        # from all our tracking lists.
        if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM):
            self._share_observers.pop(share, None)
            self._shares_from_server.discard(shnum, share)
            if self._active_share_map.get(shnum) is share:
                del self._active_share_map[shnum]
            self._overdue_share_map.discard(shnum, share)

        if state is COMPLETE:
            # 'block' is fully validated and complete
            self._blocks[shnum] = block

        if state is OVERDUE:
            # no longer active, but still might complete
            del self._active_share_map[shnum]
            self._overdue_share_map.add(shnum, share)
            # OVERDUE is not terminal: it will eventually transition to
            # COMPLETE, CORRUPT, or DEAD.

        if state is DEAD:
            self._last_failure = f
        if state is BADSEGNUM:
            # our main loop will ask the DownloadNode each time for the
            # number of segments, so we'll deal with this in the top of
            # _do_loop
            pass

        eventually(self.loop)
Esempio n. 15
0
 def _clear(self):
     # used by unit tests
     self.cache = DictOfSets()
Esempio n. 16
0
class Retrieve:
    # this class is currently single-use. Eventually (in MDMF) we will make
    # it multi-use, in which case you can call download(range) multiple
    # times, and each will have a separate response chain. However the
    # Retrieve object will remain tied to a specific version of the file, and
    # will use a single ServerMap instance.

    def __init__(self, filenode, servermap, verinfo, fetch_privkey=False):
        self._node = filenode
        assert self._node.get_pubkey()
        self._storage_index = filenode.get_storage_index()
        assert self._node.get_readkey()
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._outstanding_queries = {} # maps (peerid,shnum) to start_time
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        assert self._node.get_pubkey()
        self.verinfo = verinfo
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate the a new version.
        self._need_privkey = fetch_privkey
        if self._node.get_privkey():
            self._need_privkey = False

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)

    def get_status(self):
        return self._status

    def log(self, *args, **kwargs):
        if "parent" not in kwargs:
            kwargs["parent"] = self._log_number
        if "facility" not in kwargs:
            kwargs["facility"] = "tahoe.mutable.retrieve"
        return log.msg(*args, **kwargs)

    def download(self):
        self._done_deferred = defer.Deferred()
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, peerid, timestamp) in shares:
            self.remaining_sharemap.add(shnum, peerid)

        self.shares = {} # maps shnum to validated blocks

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(self.remaining_sharemap) >= k
        # we start with the lowest shnums we have available, since FEC is
        # faster if we're using "primary shares"
        self.active_shnums = set(sorted(self.remaining_sharemap.keys())[:k])
        for shnum in self.active_shnums:
            # we use an arbitrary peer who has the share. If shares are
            # doubled up (more than one share per peer), we could make this
            # run faster by spreading the load among multiple peers. But the
            # algorithm to do that is more complicated than I want to write
            # right now, and a well-provisioned grid shouldn't have multiple
            # shares per peer.
            peerid = list(self.remaining_sharemap[shnum])[0]
            self.get_data(shnum, peerid)

        # control flow beyond this point: state machine. Receiving responses
        # from queries is the input. We might send out more queries, or we
        # might produce a result.

        return self._done_deferred

    def get_data(self, shnum, peerid):
        self.log(format="sending sh#%(shnum)d request to [%(peerid)s]",
                 shnum=shnum,
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY)
        ss = self.servermap.connections[peerid]
        started = time.time()
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        offsets = dict(offsets_tuple)

        # we read the checkstring, to make sure that the data we grab is from
        # the right version.
        readv = [ (0, struct.calcsize(SIGNED_PREFIX)) ]

        # We also read the data, and the hashes necessary to validate them
        # (share_hash_chain, block_hash_tree, share_data). We don't read the
        # signature or the pubkey, since that was handled during the
        # servermap phase, and we'll be comparing the share hash chain
        # against the roothash that was validated back then.

        readv.append( (offsets['share_hash_chain'],
                       offsets['enc_privkey'] - offsets['share_hash_chain'] ) )

        # if we need the private key (for repair), we also fetch that
        if self._need_privkey:
            readv.append( (offsets['enc_privkey'],
                           offsets['EOF'] - offsets['enc_privkey']) )

        m = Marker()
        self._outstanding_queries[m] = (peerid, shnum, started)

        # ask the cache first
        got_from_cache = False
        datavs = []
        for (offset, length) in readv:
            data = self._node._read_from_cache(self.verinfo, shnum, offset, length)
            if data is not None:
                datavs.append(data)
        if len(datavs) == len(readv):
            self.log("got data from cache")
            got_from_cache = True
            d = fireEventually({shnum: datavs})
            # datavs is a dict mapping shnum to a pair of strings
        else:
            d = self._do_read(ss, peerid, self._storage_index, [shnum], readv)
        self.remaining_sharemap.discard(shnum, peerid)

        d.addCallback(self._got_results, m, peerid, started, got_from_cache)
        d.addErrback(self._query_failed, m, peerid)
        # errors that aren't handled by _query_failed (and errors caused by
        # _query_failed) get logged, but we still want to check for doneness.
        def _oops(f):
            self.log(format="problem in _query_failed for sh#%(shnum)d to %(peerid)s",
                     shnum=shnum,
                     peerid=idlib.shortnodeid_b2a(peerid),
                     failure=f,
                     level=log.WEIRD, umid="W0xnQA")
        d.addErrback(_oops)
        d.addBoth(self._check_for_done)
        # any error during _check_for_done means the download fails. If the
        # download is successful, _check_for_done will fire _done by itself.
        d.addErrback(self._done)
        d.addErrback(log.err)
        return d # purely for testing convenience

    def _do_read(self, ss, peerid, storage_index, shnums, readv):
        # isolate the callRemote to a separate method, so tests can subclass
        # Publish and override it
        d = ss.callRemote("slot_readv", storage_index, shnums, readv)
        return d

    def remove_peer(self, peerid):
        for shnum in list(self.remaining_sharemap.keys()):
            self.remaining_sharemap.discard(shnum, peerid)

    def _got_results(self, datavs, marker, peerid, started, got_from_cache):
        now = time.time()
        elapsed = now - started
        if not got_from_cache:
            self._status.add_fetch_timing(peerid, elapsed)
        self.log(format="got results (%(shares)d shares) from [%(peerid)s]",
                 shares=len(datavs),
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY)
        self._outstanding_queries.pop(marker, None)
        if not self._running:
            return

        # note that we only ask for a single share per query, so we only
        # expect a single share back. On the other hand, we use the extra
        # shares if we get them.. seems better than an assert().

        for shnum,datav in datavs.items():
            (prefix, hash_and_data) = datav[:2]
            try:
                self._got_results_one_share(shnum, peerid,
                                            prefix, hash_and_data)
            except CorruptShareError, e:
                # log it and give the other shares a chance to be processed
                f = failure.Failure()
                self.log(format="bad share: %(f_value)s",
                         f_value=str(f.value), failure=f,
                         level=log.WEIRD, umid="7fzWZw")
                self.notify_server_corruption(peerid, shnum, str(e))
                self.remove_peer(peerid)
                self.servermap.mark_bad_share(peerid, shnum, prefix)
                self._bad_shares.add( (peerid, shnum) )
                self._status.problems[peerid] = f
                self._last_failure = f
                pass
            if self._need_privkey and len(datav) > 2:
                lp = None
                self._try_to_validate_privkey(datav[2], peerid, shnum, lp)
Esempio n. 17
0
    def update_goal(self):
        # if log.recording_noisy
        if True:
            self.log_goal(self.goal, "before update: ")

        # first, remove any bad peers from our goal
        self.goal = set([ (peerid, shnum)
                          for (peerid, shnum) in self.goal
                          if peerid not in self.bad_peers ])

        # find the homeless shares:
        homefull_shares = set([shnum for (peerid, shnum) in self.goal])
        homeless_shares = set(range(self.total_shares)) - homefull_shares
        homeless_shares = sorted(list(homeless_shares))
        # place them somewhere. We prefer unused servers at the beginning of
        # the available peer list.

        if not homeless_shares:
            return

        # if an old share X is on a node, put the new share X there too.
        # TODO: 1: redistribute shares to achieve one-per-peer, by copying
        #       shares from existing peers to new (less-crowded) ones. The
        #       old shares must still be updated.
        # TODO: 2: move those shares instead of copying them, to reduce future
        #       update work

        # this is a bit CPU intensive but easy to analyze. We create a sort
        # order for each peerid. If the peerid is marked as bad, we don't
        # even put them in the list. Then we care about the number of shares
        # which have already been assigned to them. After that we care about
        # their permutation order.
        old_assignments = DictOfSets()
        for (peerid, shnum) in self.goal:
            old_assignments.add(peerid, shnum)

        peerlist = []
        for i, (peerid, ss) in enumerate(self.full_peerlist):
            if peerid in self.bad_peers:
                continue
            entry = (len(old_assignments.get(peerid, [])), i, peerid, ss)
            peerlist.append(entry)
        peerlist.sort()

        if not peerlist:
            raise NotEnoughServersError("Ran out of non-bad servers, "
                                        "first_error=%s" %
                                        str(self._first_write_error),
                                        self._first_write_error)

        # we then index this peerlist with an integer, because we may have to
        # wrap. We update the goal as we go.
        i = 0
        for shnum in homeless_shares:
            (ignored1, ignored2, peerid, ss) = peerlist[i]
            # if we are forced to send a share to a server that already has
            # one, we may have two write requests in flight, and the
            # servermap (which was computed before either request was sent)
            # won't reflect the new shares, so the second response will be
            # surprising. There is code in _got_write_answer() to tolerate
            # this, otherwise it would cause the publish to fail with an
            # UncoordinatedWriteError. See #546 for details of the trouble
            # this used to cause.
            self.goal.add( (peerid, shnum) )
            self.connections[peerid] = ss
            i += 1
            if i >= len(peerlist):
                i = 0
        if True:
            self.log_goal(self.goal, "after update: ")
Esempio n. 18
0
 def make_sharemap(self):
     """Return a dict that maps shnum to a set of peerds that hold it."""
     sharemap = DictOfSets()
     for (peerid, shnum) in self.servermap:
         sharemap.add(shnum, peerid)
     return sharemap
Esempio n. 19
0
class SegmentFetcher:
    """I am responsible for acquiring blocks for a single segment. I will use
    the Share instances passed to my add_shares() method to locate, retrieve,
    and validate those blocks. I expect my parent node to call my
    no_more_shares() method when there are no more shares available. I will
    call my parent's want_more_shares() method when I want more: I expect to
    see at least one call to add_shares or no_more_shares afterwards.

    When I have enough validated blocks, I will call my parent's
    process_blocks() method with a dictionary that maps shnum to blockdata.
    If I am unable to provide enough blocks, I will call my parent's
    fetch_failed() method with (self, f). After either of these events, I
    will shut down and do no further work. My parent can also call my stop()
    method to have me shut down early."""
    def __init__(self, node, segnum, k, logparent):
        self._node = node  # _Node
        self.segnum = segnum
        self._k = k
        self._shares = []  # unused Share instances, sorted by "goodness"
        # (RTT), then shnum. This is populated when DYHB
        # responses arrive, or (for later segments) at
        # startup. We remove shares from it when we call
        # sh.get_block() on them.
        self._shares_from_server = DictOfSets()  # maps server to set of
        # Shares on that server for
        # which we have outstanding
        # get_block() calls.
        self._max_shares_per_server = 1  # how many Shares we're allowed to
        # pull from each server. This starts
        # at 1 and grows if we don't have
        # sufficient diversity.
        self._active_share_map = {}  # maps shnum to outstanding (and not
        # OVERDUE) Share that provides it.
        self._overdue_share_map = DictOfSets()  # shares in the OVERDUE state
        self._lp = logparent
        self._share_observers = {}  # maps Share to EventStreamObserver for
        # active ones
        self._blocks = {}  # maps shnum to validated block data
        self._no_more_shares = False
        self._last_failure = None
        self._running = True

    def stop(self):
        log.msg("SegmentFetcher(%s).stop" % self._node._si_prefix,
                level=log.NOISY,
                parent=self._lp,
                umid="LWyqpg")
        self._cancel_all_requests()
        self._running = False
        # help GC ??? XXX
        del self._shares, self._shares_from_server, self._active_share_map
        del self._share_observers

    # called by our parent _Node

    def add_shares(self, shares):
        # called when ShareFinder locates a new share, and when a non-initial
        # segment fetch is started and we already know about shares from the
        # previous segment
        self._shares.extend(shares)
        self._shares.sort(key=lambda s: (s._dyhb_rtt, s._shnum))
        eventually(self.loop)

    def no_more_shares(self):
        # ShareFinder tells us it's reached the end of its list
        self._no_more_shares = True
        eventually(self.loop)

    # internal methods

    def loop(self):
        try:
            # if any exception occurs here, kill the download
            self._do_loop()
        except BaseException:
            self._node.fetch_failed(self, Failure())
            raise

    def _do_loop(self):
        k = self._k
        if not self._running:
            return
        numsegs, authoritative = self._node.get_num_segments()
        if authoritative and self.segnum >= numsegs:
            # oops, we were asking for a segment number beyond the end of the
            # file. This is an error.
            self.stop()
            e = BadSegmentNumberError("segnum=%d, numsegs=%d" %
                                      (self.segnum, self._node.num_segments))
            f = Failure(e)
            self._node.fetch_failed(self, f)
            return

        #print "LOOP", self._blocks.keys(), "active:", self._active_share_map, "overdue:", self._overdue_share_map, "unused:", self._shares
        # Should we sent out more requests?
        while len(
                set(self._blocks.keys())
                | set(self._active_share_map.keys())) < k:
            # we don't have data or active requests for enough shares. Are
            # there any unused shares we can start using?
            (sent_something, want_more_diversity) = self._find_and_use_share()
            if sent_something:
                # great. loop back around in case we need to send more.
                continue
            if want_more_diversity:
                # we could have sent something if we'd been allowed to pull
                # more shares per server. Increase the limit and try again.
                self._max_shares_per_server += 1
                log.msg("SegmentFetcher(%s) increasing diversity limit to %d" %
                        (self._node._si_prefix, self._max_shares_per_server),
                        level=log.NOISY,
                        umid="xY2pBA")
                # Also ask for more shares, in the hopes of achieving better
                # diversity for the next segment.
                self._ask_for_more_shares()
                continue
            # we need more shares than the ones in self._shares to make
            # progress
            self._ask_for_more_shares()
            if self._no_more_shares:
                # But there are no more shares to be had. If we're going to
                # succeed, it will be with the shares we've already seen.
                # Will they be enough?
                if len(
                        set(self._blocks.keys())
                        | set(self._active_share_map.keys())
                        | set(self._overdue_share_map.keys())) < k:
                    # nope. bail.
                    self._no_shares_error()  # this calls self.stop()
                    return
                # our outstanding or overdue requests may yet work.
            # more shares may be coming. Wait until then.
            return

        # are we done?
        if len(set(self._blocks.keys())) >= k:
            # yay!
            self.stop()
            self._node.process_blocks(self.segnum, self._blocks)
            return

    def _no_shares_error(self):
        if not (self._shares or self._active_share_map
                or self._overdue_share_map or self._blocks):
            format = ("no shares (need %(k)d)."
                      " Last failure: %(last_failure)s")
            args = {"k": self._k, "last_failure": self._last_failure}
            error = NoSharesError
        else:
            format = ("ran out of shares: complete=%(complete)s"
                      " pending=%(pending)s overdue=%(overdue)s"
                      " unused=%(unused)s need %(k)d."
                      " Last failure: %(last_failure)s")

            def join(shnums):
                return ",".join(["sh%d" % shnum for shnum in sorted(shnums)])

            pending_s = ",".join(
                [str(sh) for sh in self._active_share_map.values()])
            overdue = set()
            for shares in self._overdue_share_map.values():
                overdue |= shares
            overdue_s = ",".join([str(sh) for sh in overdue])
            args = {
                "complete": join(self._blocks.keys()),
                "pending": pending_s,
                "overdue": overdue_s,
                # 'unused' should be zero
                "unused": ",".join([str(sh) for sh in self._shares]),
                "k": self._k,
                "last_failure": self._last_failure,
            }
            error = NotEnoughSharesError
        log.msg(format=format,
                level=log.UNUSUAL,
                parent=self._lp,
                umid="1DsnTg",
                **args)
        e = error(format % args)
        f = Failure(e)
        self.stop()
        self._node.fetch_failed(self, f)

    def _find_and_use_share(self):
        sent_something = False
        want_more_diversity = False
        for sh in self._shares:  # find one good share to fetch
            shnum = sh._shnum
            server = sh._server  # XXX
            if shnum in self._blocks:
                continue  # don't request data we already have
            if shnum in self._active_share_map:
                # note: OVERDUE shares are removed from _active_share_map
                # and added to _overdue_share_map instead.
                continue  # don't send redundant requests
            sfs = self._shares_from_server
            if len(sfs.get(server, set())) >= self._max_shares_per_server:
                # don't pull too much from a single server
                want_more_diversity = True
                continue
            # ok, we can use this share
            self._shares.remove(sh)
            self._active_share_map[shnum] = sh
            self._shares_from_server.add(server, sh)
            self._start_share(sh, shnum)
            sent_something = True
            break
        return (sent_something, want_more_diversity)

    def _start_share(self, share, shnum):
        self._share_observers[share] = o = share.get_block(self.segnum)
        o.subscribe(self._block_request_activity, share=share, shnum=shnum)

    def _ask_for_more_shares(self):
        if not self._no_more_shares:
            self._node.want_more_shares()
            # that will trigger the ShareFinder to keep looking, and call our
            # add_shares() or no_more_shares() later.

    def _cancel_all_requests(self):
        for o in self._share_observers.values():
            o.cancel()
        self._share_observers = {}

    def _block_request_activity(self, share, shnum, state, block=None, f=None):
        # called by Shares, in response to our s.send_request() calls.
        if not self._running:
            return
        log.msg("SegmentFetcher(%s)._block_request_activity: %s -> %s" %
                (self._node._si_prefix, repr(share), state),
                level=log.NOISY,
                parent=self._lp,
                umid="vilNWA")
        # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. Remove the share
        # from all our tracking lists.
        if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM):
            self._share_observers.pop(share, None)
            server = share._server  # XXX
            self._shares_from_server.discard(server, share)
            if self._active_share_map.get(shnum) is share:
                del self._active_share_map[shnum]
            self._overdue_share_map.discard(shnum, share)

        if state is COMPLETE:
            # 'block' is fully validated and complete
            self._blocks[shnum] = block

        if state is OVERDUE:
            # no longer active, but still might complete
            del self._active_share_map[shnum]
            self._overdue_share_map.add(shnum, share)
            # OVERDUE is not terminal: it will eventually transition to
            # COMPLETE, CORRUPT, or DEAD.

        if state is DEAD:
            self._last_failure = f
        if state is BADSEGNUM:
            # our main loop will ask the DownloadNode each time for the
            # number of segments, so we'll deal with this in the top of
            # _do_loop
            pass

        eventually(self.loop)
Esempio n. 20
0
    def _send_shares(self, needed):
        self.log("_send_shares")

        # we're finally ready to send out our shares. If we encounter any
        # surprises here, it's because somebody else is writing at the same
        # time. (Note: in the future, when we remove the _query_peers() step
        # and instead speculate about [or remember] which shares are where,
        # surprises here are *not* indications of UncoordinatedWriteError,
        # and we'll need to respond to them more gracefully.)

        # needed is a set of (peerid, shnum) tuples. The first thing we do is
        # organize it by peerid.

        peermap = DictOfSets()
        for (peerid, shnum) in needed:
            peermap.add(peerid, shnum)

        # the next thing is to build up a bunch of test vectors. The
        # semantics of Publish are that we perform the operation if the world
        # hasn't changed since the ServerMap was constructed (more or less).
        # For every share we're trying to place, we create a test vector that
        # tests to see if the server*share still corresponds to the
        # map.

        all_tw_vectors = {}  # maps peerid to tw_vectors
        sm = self._servermap.servermap

        for key in needed:
            (peerid, shnum) = key

            if key in sm:
                # an old version of that share already exists on the
                # server, according to our servermap. We will create a
                # request that attempts to replace it.
                old_versionid, old_timestamp = sm[key]
                (old_seqnum, old_root_hash, old_salt, old_segsize,
                 old_datalength, old_k, old_N, old_prefix,
                 old_offsets_tuple) = old_versionid
                old_checkstring = pack_checkstring(old_seqnum, old_root_hash,
                                                   old_salt)
                testv = (0, len(old_checkstring), "eq", old_checkstring)

            elif key in self.bad_share_checkstrings:
                old_checkstring = self.bad_share_checkstrings[key]
                testv = (0, len(old_checkstring), "eq", old_checkstring)

            else:
                # add a testv that requires the share not exist

                # Unfortunately, foolscap-0.2.5 has a bug in the way inbound
                # constraints are handled. If the same object is referenced
                # multiple times inside the arguments, foolscap emits a
                # 'reference' token instead of a distinct copy of the
                # argument. The bug is that these 'reference' tokens are not
                # accepted by the inbound constraint code. To work around
                # this, we need to prevent python from interning the
                # (constant) tuple, by creating a new copy of this vector
                # each time.

                # This bug is fixed in foolscap-0.2.6, and even though this
                # version of Tahoe requires foolscap-0.3.1 or newer, we are
                # supposed to be able to interoperate with older versions of
                # Tahoe which are allowed to use older versions of foolscap,
                # including foolscap-0.2.5 . In addition, I've seen other
                # foolscap problems triggered by 'reference' tokens (see #541
                # for details). So we must keep this workaround in place.

                #testv = (0, 1, 'eq', "")
                testv = tuple([0, 1, 'eq', ""])

            testvs = [testv]
            # the write vector is simply the share
            writev = [(0, self.shares[shnum])]

            if peerid not in all_tw_vectors:
                all_tw_vectors[peerid] = {}
                # maps shnum to (testvs, writevs, new_length)
            assert shnum not in all_tw_vectors[peerid]

            all_tw_vectors[peerid][shnum] = (testvs, writev, None)

        # we read the checkstring back from each share, however we only use
        # it to detect whether there was a new share that we didn't know
        # about. The success or failure of the write will tell us whether
        # there was a collision or not. If there is a collision, the first
        # thing we'll do is update the servermap, which will find out what
        # happened. We could conceivably reduce a roundtrip by using the
        # readv checkstring to populate the servermap, but really we'd have
        # to read enough data to validate the signatures too, so it wouldn't
        # be an overall win.
        read_vector = [(0, struct.calcsize(SIGNED_PREFIX))]

        # ok, send the messages!
        self.log("sending %d shares" % len(all_tw_vectors), level=log.NOISY)
        started = time.time()
        for (peerid, tw_vectors) in all_tw_vectors.items():

            write_enabler = self._node.get_write_enabler(peerid)
            renew_secret = self._node.get_renewal_secret(peerid)
            cancel_secret = self._node.get_cancel_secret(peerid)
            secrets = (write_enabler, renew_secret, cancel_secret)
            shnums = tw_vectors.keys()

            for shnum in shnums:
                self.outstanding.add((peerid, shnum))

            d = self._do_testreadwrite(peerid, secrets, tw_vectors,
                                       read_vector)
            d.addCallbacks(self._got_write_answer,
                           self._got_write_error,
                           callbackArgs=(peerid, shnums, started),
                           errbackArgs=(peerid, shnums, started))
            # tolerate immediate errback, like with DeadReferenceError
            d.addBoth(fireEventually)
            d.addCallback(self.loop)
            d.addErrback(self._fatal_error)

        self._update_status()
        self.log("%d shares sent" % len(all_tw_vectors), level=log.NOISY)
Esempio n. 21
0
    def update_goal(self):
        # if log.recording_noisy
        if True:
            self.log_goal(self.goal, "before update: ")

        # first, remove any bad peers from our goal
        self.goal = set([(peerid, shnum) for (peerid, shnum) in self.goal
                         if peerid not in self.bad_peers])

        # find the homeless shares:
        homefull_shares = set([shnum for (peerid, shnum) in self.goal])
        homeless_shares = set(range(self.total_shares)) - homefull_shares
        homeless_shares = sorted(list(homeless_shares))
        # place them somewhere. We prefer unused servers at the beginning of
        # the available peer list.

        if not homeless_shares:
            return

        # if an old share X is on a node, put the new share X there too.
        # TODO: 1: redistribute shares to achieve one-per-peer, by copying
        #       shares from existing peers to new (less-crowded) ones. The
        #       old shares must still be updated.
        # TODO: 2: move those shares instead of copying them, to reduce future
        #       update work

        # this is a bit CPU intensive but easy to analyze. We create a sort
        # order for each peerid. If the peerid is marked as bad, we don't
        # even put them in the list. Then we care about the number of shares
        # which have already been assigned to them. After that we care about
        # their permutation order.
        old_assignments = DictOfSets()
        for (peerid, shnum) in self.goal:
            old_assignments.add(peerid, shnum)

        peerlist = []
        for i, (peerid, ss) in enumerate(self.full_peerlist):
            if peerid in self.bad_peers:
                continue
            entry = (len(old_assignments.get(peerid, [])), i, peerid, ss)
            peerlist.append(entry)
        peerlist.sort()

        if not peerlist:
            raise NotEnoughServersError(
                "Ran out of non-bad servers, "
                "first_error=%s" % str(self._first_write_error),
                self._first_write_error)

        # we then index this peerlist with an integer, because we may have to
        # wrap. We update the goal as we go.
        i = 0
        for shnum in homeless_shares:
            (ignored1, ignored2, peerid, ss) = peerlist[i]
            # if we are forced to send a share to a server that already has
            # one, we may have two write requests in flight, and the
            # servermap (which was computed before either request was sent)
            # won't reflect the new shares, so the second response will be
            # surprising. There is code in _got_write_answer() to tolerate
            # this, otherwise it would cause the publish to fail with an
            # UncoordinatedWriteError. See #546 for details of the trouble
            # this used to cause.
            self.goal.add((peerid, shnum))
            self.connections[peerid] = ss
            i += 1
            if i >= len(peerlist):
                i = 0
        if True:
            self.log_goal(self.goal, "after update: ")
Esempio n. 22
0
class Retrieve:
    # this class is currently single-use. Eventually (in MDMF) we will make
    # it multi-use, in which case you can call download(range) multiple
    # times, and each will have a separate response chain. However the
    # Retrieve object will remain tied to a specific version of the file, and
    # will use a single ServerMap instance.
    implements(IPushProducer)

    def __init__(self, filenode, storage_broker, servermap, verinfo,
                 fetch_privkey=False, verify=False):
        self._node = filenode
        _assert(self._node.get_pubkey())
        self._storage_broker = storage_broker
        self._storage_index = filenode.get_storage_index()
        _assert(self._node.get_readkey())
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        self.verinfo = verinfo
        # TODO: make it possible to use self.verinfo.datalength instead
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._data_length = datalength
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate a new version.
        self._need_privkey = verify or (fetch_privkey
                                        and not self._node.get_privkey())

        if self._need_privkey:
            # TODO: Evaluate the need for this. We'll use it if we want
            # to limit how many queries are on the wire for the privkey
            # at once.
            self._privkey_query_markers = [] # one Marker for each time we've
                                             # tried to get the privkey.

        # verify means that we are using the downloader logic to verify all
        # of our shares. This tells the downloader a few things.
        #
        # 1. We need to download all of the shares.
        # 2. We don't need to decode or decrypt the shares, since our
        #    caller doesn't care about the plaintext, only the
        #    information about which shares are or are not valid.
        # 3. When we are validating readers, we need to validate the
        #    signature on the prefix. Do we? We already do this in the
        #    servermap update?
        self._verify = verify

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)
        self.readers = {}
        self._stopped = False
        self._pause_deferred = None
        self._offset = None
        self._read_length = None
        self.log("got seqnum %d" % self.verinfo[0])


    def get_status(self):
        return self._status

    def log(self, *args, **kwargs):
        if "parent" not in kwargs:
            kwargs["parent"] = self._log_number
        if "facility" not in kwargs:
            kwargs["facility"] = "tahoe.mutable.retrieve"
        return log.msg(*args, **kwargs)

    def _set_current_status(self, state):
        seg = "%d/%d" % (self._current_segment, self._last_segment)
        self._status.set_status("segment %s (%s)" % (seg, state))

    ###################
    # IPushProducer

    def pauseProducing(self):
        """
        I am called by my download target if we have produced too much
        data for it to handle. I make the downloader stop producing new
        data until my resumeProducing method is called.
        """
        if self._pause_deferred is not None:
            return

        # fired when the download is unpaused.
        self._old_status = self._status.get_status()
        self._set_current_status("paused")

        self._pause_deferred = defer.Deferred()


    def resumeProducing(self):
        """
        I am called by my download target once it is ready to begin
        receiving data again.
        """
        if self._pause_deferred is None:
            return

        p = self._pause_deferred
        self._pause_deferred = None
        self._status.set_status(self._old_status)

        eventually(p.callback, None)

    def stopProducing(self):
        self._stopped = True
        self.resumeProducing()


    def _check_for_paused(self, res):
        """
        I am called just before a write to the consumer. I return a
        Deferred that eventually fires with the data that is to be
        written to the consumer. If the download has not been paused,
        the Deferred fires immediately. Otherwise, the Deferred fires
        when the downloader is unpaused.
        """
        if self._pause_deferred is not None:
            d = defer.Deferred()
            self._pause_deferred.addCallback(lambda ignored: d.callback(res))
            return d
        return res

    def _check_for_stopped(self, res):
        if self._stopped:
            raise DownloadStopped("our Consumer called stopProducing()")
        return res


    def download(self, consumer=None, offset=0, size=None):
        precondition(self._verify or IConsumer.providedBy(consumer))
        if size is None:
            size = self._data_length - offset
        if self._verify:
            _assert(size == self._data_length, (size, self._data_length))
        self.log("starting download")
        self._done_deferred = defer.Deferred()
        if consumer:
            self._consumer = consumer
            # we provide IPushProducer, so streaming=True, per IConsumer.
            self._consumer.registerProducer(self, streaming=True)
        self._started = time.time()
        self._started_fetching = time.time()
        if size == 0:
            # short-circuit the rest of the process
            self._done()
        else:
            self._start_download(consumer, offset, size)
        return self._done_deferred

    def _start_download(self, consumer, offset, size):
        precondition((0 <= offset < self._data_length)
                     and (size > 0)
                     and (offset+size <= self._data_length),
                     (offset, size, self._data_length))

        self._offset = offset
        self._read_length = size
        self._setup_encoding_parameters()
        self._setup_download()

        # The download process beyond this is a state machine.
        # _add_active_servers will select the servers that we want to use
        # for the download, and then attempt to start downloading. After
        # each segment, it will check for doneness, reacting to broken
        # servers and corrupt shares as necessary. If it runs out of good
        # servers before downloading all of the segments, _done_deferred
        # will errback.  Otherwise, it will eventually callback with the
        # contents of the mutable file.
        self.loop()

    def loop(self):
        d = fireEventually(None) # avoid #237 recursion limit problem
        d.addCallback(lambda ign: self._activate_enough_servers())
        d.addCallback(lambda ign: self._download_current_segment())
        # when we're done, _download_current_segment will call _done. If we
        # aren't, it will call loop() again.
        d.addErrback(self._error)

    def _setup_download(self):
        self._status.set_status("Retrieving Shares")

        # how many shares do we need?
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         N,
         prefix,
         offsets_tuple) = self.verinfo

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, server, timestamp) in shares:
            self.remaining_sharemap.add(shnum, server)
            # Reuse the SlotReader from the servermap.
            key = (self.verinfo, server.get_serverid(),
                   self._storage_index, shnum)
            if key in self.servermap.proxies:
                reader = self.servermap.proxies[key]
            else:
                reader = MDMFSlotReadProxy(server.get_rref(),
                                           self._storage_index, shnum, None)
            reader.server = server
            self.readers[shnum] = reader

        if len(self.remaining_sharemap) < k:
            self._raise_notenoughshareserror()

        self.shares = {} # maps shnum to validated blocks
        self._active_readers = [] # list of active readers for this dl.
        self._block_hash_trees = {} # shnum => hashtree

        for i in xrange(self._total_shares):
            # So we don't have to do this later.
            self._block_hash_trees[i] = hashtree.IncompleteHashTree(self._num_segments)

        # We need one share hash tree for the entire file; its leaves
        # are the roots of the block hash trees for the shares that
        # comprise it, and its root is in the verinfo.
        self.share_hash_tree = hashtree.IncompleteHashTree(N)
        self.share_hash_tree.set_hashes({0: root_hash})

    def decode(self, blocks_and_salts, segnum):
        """
        I am a helper method that the mutable file update process uses
        as a shortcut to decode and decrypt the segments that it needs
        to fetch in order to perform a file update. I take in a
        collection of blocks and salts, and pick some of those to make a
        segment with. I return the plaintext associated with that
        segment.
        """
        # We don't need the block hash trees in this case.
        self._block_hash_trees = None
        self._offset = 0
        self._read_length = self._data_length
        self._setup_encoding_parameters()

        # _decode_blocks() expects the output of a gatherResults that
        # contains the outputs of _validate_block() (each of which is a dict
        # mapping shnum to (block,salt) bytestrings).
        d = self._decode_blocks([blocks_and_salts], segnum)
        d.addCallback(self._decrypt_segment)
        return d


    def _setup_encoding_parameters(self):
        """
        I set up the encoding parameters, including k, n, the number
        of segments associated with this file, and the segment decoders.
        """
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         n,
         known_prefix,
         offsets_tuple) = self.verinfo
        self._required_shares = k
        self._total_shares = n
        self._segment_size = segsize
        #self._data_length = datalength # set during __init__()

        if not IV:
            self._version = MDMF_VERSION
        else:
            self._version = SDMF_VERSION

        if datalength and segsize:
            self._num_segments = mathutil.div_ceil(datalength, segsize)
            self._tail_data_size = datalength % segsize
        else:
            self._num_segments = 0
            self._tail_data_size = 0

        self._segment_decoder = codec.CRSDecoder()
        self._segment_decoder.set_params(segsize, k, n)

        if  not self._tail_data_size:
            self._tail_data_size = segsize

        self._tail_segment_size = mathutil.next_multiple(self._tail_data_size,
                                                         self._required_shares)
        if self._tail_segment_size == self._segment_size:
            self._tail_decoder = self._segment_decoder
        else:
            self._tail_decoder = codec.CRSDecoder()
            self._tail_decoder.set_params(self._tail_segment_size,
                                          self._required_shares,
                                          self._total_shares)

        self.log("got encoding parameters: "
                 "k: %d "
                 "n: %d "
                 "%d segments of %d bytes each (%d byte tail segment)" % \
                 (k, n, self._num_segments, self._segment_size,
                  self._tail_segment_size))

        # Our last task is to tell the downloader where to start and
        # where to stop. We use three parameters for that:
        #   - self._start_segment: the segment that we need to start
        #     downloading from.
        #   - self._current_segment: the next segment that we need to
        #     download.
        #   - self._last_segment: The last segment that we were asked to
        #     download.
        #
        #  We say that the download is complete when
        #  self._current_segment > self._last_segment. We use
        #  self._start_segment and self._last_segment to know when to
        #  strip things off of segments, and how much to strip.
        if self._offset:
            self.log("got offset: %d" % self._offset)
            # our start segment is the first segment containing the
            # offset we were given.
            start = self._offset // self._segment_size

            _assert(start <= self._num_segments,
                    start=start, num_segments=self._num_segments,
                    offset=self._offset, segment_size=self._segment_size)
            self._start_segment = start
            self.log("got start segment: %d" % self._start_segment)
        else:
            self._start_segment = 0

        # We might want to read only part of the file, and need to figure out
        # where to stop reading. Our end segment is the last segment
        # containing part of the segment that we were asked to read.
        _assert(self._read_length > 0, self._read_length)
        end_data = self._offset + self._read_length

        # We don't actually need to read the byte at end_data, but the one
        # before it.
        end = (end_data - 1) // self._segment_size
        _assert(0 <= end < self._num_segments,
                end=end, num_segments=self._num_segments,
                end_data=end_data, offset=self._offset,
                read_length=self._read_length, segment_size=self._segment_size)
        self._last_segment = end
        self.log("got end segment: %d" % self._last_segment)

        self._current_segment = self._start_segment

    def _activate_enough_servers(self):
        """
        I populate self._active_readers with enough active readers to
        retrieve the contents of this mutable file. I am called before
        downloading starts, and (eventually) after each validation
        error, connection error, or other problem in the download.
        """
        # TODO: It would be cool to investigate other heuristics for
        # reader selection. For instance, the cost (in time the user
        # spends waiting for their file) of selecting a really slow server
        # that happens to have a primary share is probably more than
        # selecting a really fast server that doesn't have a primary
        # share. Maybe the servermap could be extended to provide this
        # information; it could keep track of latency information while
        # it gathers more important data, and then this routine could
        # use that to select active readers.
        #
        # (these and other questions would be easier to answer with a
        #  robust, configurable tahoe-lafs simulator, which modeled node
        #  failures, differences in node speed, and other characteristics
        #  that we expect storage servers to have.  You could have
        #  presets for really stable grids (like allmydata.com),
        #  friendnets, make it easy to configure your own settings, and
        #  then simulate the effect of big changes on these use cases
        #  instead of just reasoning about what the effect might be. Out
        #  of scope for MDMF, though.)

        # XXX: Why don't format= log messages work here?

        known_shnums = set(self.remaining_sharemap.keys())
        used_shnums = set([r.shnum for r in self._active_readers])
        unused_shnums = known_shnums - used_shnums

        if self._verify:
            new_shnums = unused_shnums # use them all
        elif len(self._active_readers) < self._required_shares:
            # need more shares
            more = self._required_shares - len(self._active_readers)
            # We favor lower numbered shares, since FEC is faster with
            # primary shares than with other shares, and lower-numbered
            # shares are more likely to be primary than higher numbered
            # shares.
            new_shnums = sorted(unused_shnums)[:more]
            if len(new_shnums) < more:
                # We don't have enough readers to retrieve the file; fail.
                self._raise_notenoughshareserror()
        else:
            new_shnums = []

        self.log("adding %d new servers to the active list" % len(new_shnums))
        for shnum in new_shnums:
            reader = self.readers[shnum]
            self._active_readers.append(reader)
            self.log("added reader for share %d" % shnum)
            # Each time we add a reader, we check to see if we need the
            # private key. If we do, we politely ask for it and then continue
            # computing. If we find that we haven't gotten it at the end of
            # segment decoding, then we'll take more drastic measures.
            if self._need_privkey and not self._node.is_readonly():
                d = reader.get_encprivkey()
                d.addCallback(self._try_to_validate_privkey, reader, reader.server)
                # XXX: don't just drop the Deferred. We need error-reporting
                # but not flow-control here.

    def _try_to_validate_prefix(self, prefix, reader):
        """
        I check that the prefix returned by a candidate server for
        retrieval matches the prefix that the servermap knows about
        (and, hence, the prefix that was validated earlier). If it does,
        I return True, which means that I approve of the use of the
        candidate server for segment retrieval. If it doesn't, I return
        False, which means that another server must be chosen.
        """
        (seqnum,
         root_hash,
         IV,
         segsize,
         datalength,
         k,
         N,
         known_prefix,
         offsets_tuple) = self.verinfo
        if known_prefix != prefix:
            self.log("prefix from share %d doesn't match" % reader.shnum)
            raise UncoordinatedWriteError("Mismatched prefix -- this could "
                                          "indicate an uncoordinated write")
        # Otherwise, we're okay -- no issues.

    def _mark_bad_share(self, server, shnum, reader, f):
        """
        I mark the given (server, shnum) as a bad share, which means that it
        will not be used anywhere else.

        There are several reasons to want to mark something as a bad
        share. These include:

            - A connection error to the server.
            - A mismatched prefix (that is, a prefix that does not match
              our local conception of the version information string).
            - A failing block hash, salt hash, share hash, or other
              integrity check.

        This method will ensure that readers that we wish to mark bad
        (for these reasons or other reasons) are not used for the rest
        of the download. Additionally, it will attempt to tell the
        remote server (with no guarantee of success) that its share is
        corrupt.
        """
        self.log("marking share %d on server %s as bad" % \
                 (shnum, server.get_name()))
        prefix = self.verinfo[-2]
        self.servermap.mark_bad_share(server, shnum, prefix)
        self._bad_shares.add((server, shnum, f))
        self._status.add_problem(server, f)
        self._last_failure = f

        # Remove the reader from _active_readers
        self._active_readers.remove(reader)
        for shnum in list(self.remaining_sharemap.keys()):
            self.remaining_sharemap.discard(shnum, reader.server)

        if f.check(BadShareError):
            self.notify_server_corruption(server, shnum, str(f.value))

    def _download_current_segment(self):
        """
        I download, validate, decode, decrypt, and assemble the segment
        that this Retrieve is currently responsible for downloading.
        """

        if self._current_segment > self._last_segment:
            # No more segments to download, we're done.
            self.log("got plaintext, done")
            return self._done()
        elif self._verify and len(self._active_readers) == 0:
            self.log("no more good shares, no need to keep verifying")
            return self._done()
        self.log("on segment %d of %d" %
                 (self._current_segment + 1, self._num_segments))
        d = self._process_segment(self._current_segment)
        d.addCallback(lambda ign: self.loop())
        return d

    def _process_segment(self, segnum):
        """
        I download, validate, decode, and decrypt one segment of the
        file that this Retrieve is retrieving. This means coordinating
        the process of getting k blocks of that file, validating them,
        assembling them into one segment with the decoder, and then
        decrypting them.
        """
        self.log("processing segment %d" % segnum)

        # TODO: The old code uses a marker. Should this code do that
        # too? What did the Marker do?

        # We need to ask each of our active readers for its block and
        # salt. We will then validate those. If validation is
        # successful, we will assemble the results into plaintext.
        ds = []
        for reader in self._active_readers:
            started = time.time()
            d1 = reader.get_block_and_salt(segnum)
            d2,d3 = self._get_needed_hashes(reader, segnum)
            d = deferredutil.gatherResults([d1,d2,d3])
            d.addCallback(self._validate_block, segnum, reader, reader.server, started)
            # _handle_bad_share takes care of recoverable errors (by dropping
            # that share and returning None). Any other errors (i.e. code
            # bugs) are passed through and cause the retrieve to fail.
            d.addErrback(self._handle_bad_share, [reader])
            ds.append(d)
        dl = deferredutil.gatherResults(ds)
        if self._verify:
            dl.addCallback(lambda ignored: "")
            dl.addCallback(self._set_segment)
        else:
            dl.addCallback(self._maybe_decode_and_decrypt_segment, segnum)
        return dl


    def _maybe_decode_and_decrypt_segment(self, results, segnum):
        """
        I take the results of fetching and validating the blocks from
        _process_segment. If validation and fetching succeeded without
        incident, I will proceed with decoding and decryption. Otherwise, I
        will do nothing.
        """
        self.log("trying to decode and decrypt segment %d" % segnum)

        # 'results' is the output of a gatherResults set up in
        # _process_segment(). Each component Deferred will either contain the
        # non-Failure output of _validate_block() for a single block (i.e.
        # {segnum:(block,salt)}), or None if _validate_block threw an
        # exception and _validation_or_decoding_failed handled it (by
        # dropping that server).

        if None in results:
            self.log("some validation operations failed; not proceeding")
            return defer.succeed(None)
        self.log("everything looks ok, building segment %d" % segnum)
        d = self._decode_blocks(results, segnum)
        d.addCallback(self._decrypt_segment)
        # check to see whether we've been paused before writing
        # anything.
        d.addCallback(self._check_for_paused)
        d.addCallback(self._check_for_stopped)
        d.addCallback(self._set_segment)
        return d


    def _set_segment(self, segment):
        """
        Given a plaintext segment, I register that segment with the
        target that is handling the file download.
        """
        self.log("got plaintext for segment %d" % self._current_segment)

        if self._read_length == 0:
            self.log("on first+last segment, size=0, using 0 bytes")
            segment = b""

        if self._current_segment == self._last_segment:
            # trim off the tail
            wanted = (self._offset + self._read_length) % self._segment_size
            if wanted != 0:
                self.log("on the last segment: using first %d bytes" % wanted)
                segment = segment[:wanted]
            else:
                self.log("on the last segment: using all %d bytes" %
                         len(segment))

        if self._current_segment == self._start_segment:
            # Trim off the head, if offset != 0. This should also work if
            # start==last, because we trim the tail first.
            skip = self._offset % self._segment_size
            self.log("on the first segment: skipping first %d bytes" % skip)
            segment = segment[skip:]

        if not self._verify:
            self._consumer.write(segment)
        else:
            # we don't care about the plaintext if we are doing a verify.
            segment = None
        self._current_segment += 1


    def _handle_bad_share(self, f, readers):
        """
        I am called when a block or a salt fails to correctly validate, or when
        the decryption or decoding operation fails for some reason.  I react to
        this failure by notifying the remote server of corruption, and then
        removing the remote server from further activity.
        """
        # these are the errors we can tolerate: by giving up on this share
        # and finding others to replace it. Any other errors (i.e. coding
        # bugs) are re-raised, causing the download to fail.
        f.trap(DeadReferenceError, RemoteException, BadShareError)

        # DeadReferenceError happens when we try to fetch data from a server
        # that has gone away. RemoteException happens if the server had an
        # internal error. BadShareError encompasses: (UnknownVersionError,
        # LayoutInvalid, struct.error) which happen when we get obviously
        # wrong data, and CorruptShareError which happens later, when we
        # perform integrity checks on the data.

        precondition(isinstance(readers, list), readers)
        bad_shnums = [reader.shnum for reader in readers]

        self.log("validation or decoding failed on share(s) %s, server(s) %s "
                 ", segment %d: %s" % \
                 (bad_shnums, readers, self._current_segment, str(f)))
        for reader in readers:
            self._mark_bad_share(reader.server, reader.shnum, reader, f)
        return None


    def _validate_block(self, results, segnum, reader, server, started):
        """
        I validate a block from one share on a remote server.
        """
        # Grab the part of the block hash tree that is necessary to
        # validate this block, then generate the block hash root.
        self.log("validating share %d for segment %d" % (reader.shnum,
                                                             segnum))
        elapsed = time.time() - started
        self._status.add_fetch_timing(server, elapsed)
        self._set_current_status("validating blocks")

        block_and_salt, blockhashes, sharehashes = results
        block, salt = block_and_salt
        _assert(type(block) is str, (block, salt))

        blockhashes = dict(enumerate(blockhashes))
        self.log("the reader gave me the following blockhashes: %s" % \
                 blockhashes.keys())
        self.log("the reader gave me the following sharehashes: %s" % \
                 sharehashes.keys())
        bht = self._block_hash_trees[reader.shnum]

        if bht.needed_hashes(segnum, include_leaf=True):
            try:
                bht.set_hashes(blockhashes)
            except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                    IndexError), e:
                raise CorruptShareError(server,
                                        reader.shnum,
                                        "block hash tree failure: %s" % e)

        if self._version == MDMF_VERSION:
            blockhash = hashutil.block_hash(salt + block)
        else:
            blockhash = hashutil.block_hash(block)
        # If this works without an error, then validation is
        # successful.
        try:
           bht.set_hashes(leaves={segnum: blockhash})
        except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                IndexError), e:
            raise CorruptShareError(server,
                                    reader.shnum,
                                    "block hash tree failure: %s" % e)
Esempio n. 23
0
 def __init__(self):
     self.cache = DictOfSets()
Esempio n. 24
0
class Retrieve:
    # this class is currently single-use. Eventually (in MDMF) we will make
    # it multi-use, in which case you can call download(range) multiple
    # times, and each will have a separate response chain. However the
    # Retrieve object will remain tied to a specific version of the file, and
    # will use a single ServerMap instance.

    def __init__(self, filenode, servermap, verinfo, fetch_privkey=False):
        self._node = filenode
        assert self._node.get_pubkey()
        self._storage_index = filenode.get_storage_index()
        assert self._node.get_readkey()
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._outstanding_queries = {}  # maps (peerid,shnum) to start_time
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        assert self._node.get_pubkey()
        self.verinfo = verinfo
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate the a new version.
        self._need_privkey = fetch_privkey
        if self._node.get_privkey():
            self._need_privkey = False

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)

    def get_status(self):
        return self._status

    def log(self, *args, **kwargs):
        if "parent" not in kwargs:
            kwargs["parent"] = self._log_number
        if "facility" not in kwargs:
            kwargs["facility"] = "tahoe.mutable.retrieve"
        return log.msg(*args, **kwargs)

    def download(self):
        self._done_deferred = defer.Deferred()
        self._started = time.time()
        self._status.set_status("Retrieving Shares")

        # first, which servers can we use?
        versionmap = self.servermap.make_versionmap()
        shares = versionmap[self.verinfo]
        # this sharemap is consumed as we decide to send requests
        self.remaining_sharemap = DictOfSets()
        for (shnum, peerid, timestamp) in shares:
            self.remaining_sharemap.add(shnum, peerid)

        self.shares = {}  # maps shnum to validated blocks

        # how many shares do we need?
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(self.remaining_sharemap) >= k
        # we start with the lowest shnums we have available, since FEC is
        # faster if we're using "primary shares"
        self.active_shnums = set(sorted(self.remaining_sharemap.keys())[:k])
        for shnum in self.active_shnums:
            # we use an arbitrary peer who has the share. If shares are
            # doubled up (more than one share per peer), we could make this
            # run faster by spreading the load among multiple peers. But the
            # algorithm to do that is more complicated than I want to write
            # right now, and a well-provisioned grid shouldn't have multiple
            # shares per peer.
            peerid = list(self.remaining_sharemap[shnum])[0]
            self.get_data(shnum, peerid)

        # control flow beyond this point: state machine. Receiving responses
        # from queries is the input. We might send out more queries, or we
        # might produce a result.

        return self._done_deferred

    def get_data(self, shnum, peerid):
        self.log(format="sending sh#%(shnum)d request to [%(peerid)s]",
                 shnum=shnum,
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY)
        ss = self.servermap.connections[peerid]
        started = time.time()
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        offsets = dict(offsets_tuple)

        # we read the checkstring, to make sure that the data we grab is from
        # the right version.
        readv = [(0, struct.calcsize(SIGNED_PREFIX))]

        # We also read the data, and the hashes necessary to validate them
        # (share_hash_chain, block_hash_tree, share_data). We don't read the
        # signature or the pubkey, since that was handled during the
        # servermap phase, and we'll be comparing the share hash chain
        # against the roothash that was validated back then.

        readv.append((offsets['share_hash_chain'],
                      offsets['enc_privkey'] - offsets['share_hash_chain']))

        # if we need the private key (for repair), we also fetch that
        if self._need_privkey:
            readv.append((offsets['enc_privkey'],
                          offsets['EOF'] - offsets['enc_privkey']))

        m = Marker()
        self._outstanding_queries[m] = (peerid, shnum, started)

        # ask the cache first
        got_from_cache = False
        datavs = []
        for (offset, length) in readv:
            data = self._node._read_from_cache(self.verinfo, shnum, offset,
                                               length)
            if data is not None:
                datavs.append(data)
        if len(datavs) == len(readv):
            self.log("got data from cache")
            got_from_cache = True
            d = fireEventually({shnum: datavs})
            # datavs is a dict mapping shnum to a pair of strings
        else:
            d = self._do_read(ss, peerid, self._storage_index, [shnum], readv)
        self.remaining_sharemap.discard(shnum, peerid)

        d.addCallback(self._got_results, m, peerid, started, got_from_cache)
        d.addErrback(self._query_failed, m, peerid)

        # errors that aren't handled by _query_failed (and errors caused by
        # _query_failed) get logged, but we still want to check for doneness.
        def _oops(f):
            self.log(format=
                     "problem in _query_failed for sh#%(shnum)d to %(peerid)s",
                     shnum=shnum,
                     peerid=idlib.shortnodeid_b2a(peerid),
                     failure=f,
                     level=log.WEIRD,
                     umid="W0xnQA")

        d.addErrback(_oops)
        d.addBoth(self._check_for_done)
        # any error during _check_for_done means the download fails. If the
        # download is successful, _check_for_done will fire _done by itself.
        d.addErrback(self._done)
        d.addErrback(log.err)
        return d  # purely for testing convenience

    def _do_read(self, ss, peerid, storage_index, shnums, readv):
        # isolate the callRemote to a separate method, so tests can subclass
        # Publish and override it
        d = ss.callRemote("slot_readv", storage_index, shnums, readv)
        return d

    def remove_peer(self, peerid):
        for shnum in list(self.remaining_sharemap.keys()):
            self.remaining_sharemap.discard(shnum, peerid)

    def _got_results(self, datavs, marker, peerid, started, got_from_cache):
        now = time.time()
        elapsed = now - started
        if not got_from_cache:
            self._status.add_fetch_timing(peerid, elapsed)
        self.log(format="got results (%(shares)d shares) from [%(peerid)s]",
                 shares=len(datavs),
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY)
        self._outstanding_queries.pop(marker, None)
        if not self._running:
            return

        # note that we only ask for a single share per query, so we only
        # expect a single share back. On the other hand, we use the extra
        # shares if we get them.. seems better than an assert().

        for shnum, datav in datavs.items():
            (prefix, hash_and_data) = datav[:2]
            try:
                self._got_results_one_share(shnum, peerid, prefix,
                                            hash_and_data)
            except CorruptShareError, e:
                # log it and give the other shares a chance to be processed
                f = failure.Failure()
                self.log(format="bad share: %(f_value)s",
                         f_value=str(f.value),
                         failure=f,
                         level=log.WEIRD,
                         umid="7fzWZw")
                self.notify_server_corruption(peerid, shnum, str(e))
                self.remove_peer(peerid)
                self.servermap.mark_bad_share(peerid, shnum, prefix)
                self._bad_shares.add((peerid, shnum))
                self._status.problems[peerid] = f
                self._last_failure = f
                pass
            if self._need_privkey and len(datav) > 2:
                lp = None
                self._try_to_validate_privkey(datav[2], peerid, shnum, lp)
Esempio n. 25
0
class ResponseCache:
    """I cache share data, to reduce the number of round trips used during
    mutable file operations. All of the data in my cache is for a single
    storage index, but I will keep information on multiple shares (and
    multiple versions) for that storage index.

    My cache is indexed by a (verinfo, shnum) tuple.

    My cache entries contain a set of non-overlapping byteranges: (start,
    data, timestamp) tuples.
    """

    def __init__(self):
        self.cache = DictOfSets()

    def _clear(self):
        # used by unit tests
        self.cache = DictOfSets()

    def _does_overlap(self, x_start, x_length, y_start, y_length):
        if x_start < y_start:
            x_start, y_start = y_start, x_start
            x_length, y_length = y_length, x_length
        x_end = x_start + x_length
        y_end = y_start + y_length
        # this just returns a boolean. Eventually we'll want a form that
        # returns a range.
        if not x_length:
            return False
        if not y_length:
            return False
        if x_start >= y_end:
            return False
        if y_start >= x_end:
            return False
        return True


    def _inside(self, x_start, x_length, y_start, y_length):
        x_end = x_start + x_length
        y_end = y_start + y_length
        if x_start < y_start:
            return False
        if x_start >= y_end:
            return False
        if x_end < y_start:
            return False
        if x_end > y_end:
            return False
        return True

    def add(self, verinfo, shnum, offset, data, timestamp):
        index = (verinfo, shnum)
        self.cache.add(index, (offset, data, timestamp) )

    def read(self, verinfo, shnum, offset, length):
        """Try to satisfy a read request from cache.
        Returns (data, timestamp), or (None, None) if the cache did not hold
        the requested data.
        """

        # TODO: join multiple fragments, instead of only returning a hit if
        # we have a fragment that contains the whole request

        index = (verinfo, shnum)
        for entry in self.cache.get(index, set()):
            (e_start, e_data, e_timestamp) = entry
            if self._inside(offset, length, e_start, len(e_data)):
                want_start = offset - e_start
                want_end = offset+length - e_start
                return (e_data[want_start:want_end], e_timestamp)
        return None, None