예제 #1
0
파일: common.py 프로젝트: rezamt/tahoe-lafs
 def get_size(self):
     if isinstance(self.my_uri, uri.LiteralFileURI):
         return self.my_uri.get_size()
     try:
         data = self.all_contents[self.my_uri.to_string()]
     except KeyError, le:
         raise NotEnoughSharesError(le, 0, 3)
예제 #2
0
 def _read(self, ignored, consumer, offset, size):
     if isinstance(self.my_uri, uri.LiteralFileURI):
         data = self.my_uri.data
     else:
         if self.my_uri.to_string() not in self.all_contents:
             raise NotEnoughSharesError(None, 0, 3)
         data = self.all_contents[self.my_uri.to_string()]
     start = offset
     if size is not None:
         end = offset + size
     else:
         end = len(data)
     consumer.write(data[start:end])
     return consumer
예제 #3
0
    def _raise_notenoughshareserror(self):
        """
        I am called when there are not enough active servers left to complete
        the download. After making some useful logging statements, I throw an
        exception to that effect to the caller of this Retrieve object through
        self._done_deferred.
        """

        format = ("ran out of servers: "
                  "have %(have)d of %(total)d segments; "
                  "found %(bad)d bad shares; "
                  "have %(remaining)d remaining shares of the right version; "
                  "encoding %(k)d-of-%(n)d")
        args = {"have": self._current_segment,
                "total": self._num_segments,
                "need": self._last_segment,
                "k": self._required_shares,
                "n": self._total_shares,
                "bad": len(self._bad_shares),
                "remaining": len(self.remaining_sharemap),
               }
        raise NotEnoughSharesError("%s, last failure: %s" %
                                   (format % args, str(self._last_failure)))
예제 #4
0
 def _download_best_version(self, ignored=None, progress=None):
     if isinstance(self.my_uri, uri.LiteralFileURI):
         return self.my_uri.data
     if self.storage_index not in self.all_contents:
         raise NotEnoughSharesError(None, 0, 3)
     return self.all_contents[self.storage_index]
예제 #5
0
파일: retrieve.py 프로젝트: cpelsser/tamias
    def _maybe_send_more_queries(self, k):
        # we don't have enough shares yet. Should we send out more queries?
        # There are some number of queries outstanding, each for a single
        # share. If we can generate 'needed_shares' additional queries, we do
        # so. If we can't, then we know this file is a goner, and we raise
        # NotEnoughSharesError.
        self.log(format=("_maybe_send_more_queries, have=%(have)d, k=%(k)d, "
                         "outstanding=%(outstanding)d"),
                 have=len(self.shares),
                 k=k,
                 outstanding=len(self._outstanding_queries),
                 level=log.NOISY)

        remaining_shares = k - len(self.shares)
        needed = remaining_shares - len(self._outstanding_queries)
        if not needed:
            # we have enough queries in flight already

            # TODO: but if they've been in flight for a long time, and we
            # have reason to believe that new queries might respond faster
            # (i.e. we've seen other queries come back faster, then consider
            # sending out new queries. This could help with peers which have
            # silently gone away since the servermap was updated, for which
            # we're still waiting for the 15-minute TCP disconnect to happen.
            self.log("enough queries are in flight, no more are needed",
                     level=log.NOISY)
            return

        outstanding_shnums = set([
            shnum
            for (peerid, shnum, started) in self._outstanding_queries.values()
        ])
        # prefer low-numbered shares, they are more likely to be primary
        available_shnums = sorted(self.remaining_sharemap.keys())
        for shnum in available_shnums:
            if shnum in outstanding_shnums:
                # skip ones that are already in transit
                continue
            if shnum not in self.remaining_sharemap:
                # no servers for that shnum. note that DictOfSets removes
                # empty sets from the dict for us.
                continue
            peerid = list(self.remaining_sharemap[shnum])[0]
            # get_data will remove that peerid from the sharemap, and add the
            # query to self._outstanding_queries
            self._status.set_status("Retrieving More Shares")
            self.get_data(shnum, peerid)
            needed -= 1
            if not needed:
                break

        # at this point, we have as many outstanding queries as we can. If
        # needed!=0 then we might not have enough to recover the file.
        if needed:
            format = ("ran out of peers: "
                      "have %(have)d shares (k=%(k)d), "
                      "%(outstanding)d queries in flight, "
                      "need %(need)d more, "
                      "found %(bad)d bad shares")
            args = {
                "have": len(self.shares),
                "k": k,
                "outstanding": len(self._outstanding_queries),
                "need": needed,
                "bad": len(self._bad_shares),
            }
            self.log(format=format, level=log.WEIRD, umid="ezTfjw", **args)
            err = NotEnoughSharesError("%s, last failure: %s" %
                                       (format % args, self._last_failure))
            if self._bad_shares:
                self.log(
                    "We found some bad shares this pass. You should "
                    "update the servermap and try again to check "
                    "more peers",
                    level=log.WEIRD,
                    umid="EFkOlA")
                err.servermap = self.servermap
            raise err

        return
예제 #6
0
    def _maybe_send_more_queries(self, k):
        # we don't have enough shares yet. Should we send out more queries?
        # There are some number of queries outstanding, each for a single
        # share. If we can generate 'needed_shares' additional queries, we do
        # so. If we can't, then we know this file is a goner, and we raise
        # NotEnoughSharesError.
        self.log(format=("_maybe_send_more_queries, have=%(have)d, k=%(k)d, "
                         "outstanding=%(outstanding)d"),
                 have=len(self.shares), k=k,
                 outstanding=len(self._outstanding_queries),
                 level=log.NOISY)

        remaining_shares = k - len(self.shares)
        needed = remaining_shares - len(self._outstanding_queries)
        if not needed:
            # we have enough queries in flight already

            # TODO: but if they've been in flight for a long time, and we
            # have reason to believe that new queries might respond faster
            # (i.e. we've seen other queries come back faster, then consider
            # sending out new queries. This could help with peers which have
            # silently gone away since the servermap was updated, for which
            # we're still waiting for the 15-minute TCP disconnect to happen.
            self.log("enough queries are in flight, no more are needed",
                     level=log.NOISY)
            return

        outstanding_shnums = set([shnum
                                  for (peerid, shnum, started)
                                  in self._outstanding_queries.values()])
        # prefer low-numbered shares, they are more likely to be primary
        available_shnums = sorted(self.remaining_sharemap.keys())
        for shnum in available_shnums:
            if shnum in outstanding_shnums:
                # skip ones that are already in transit
                continue
            if shnum not in self.remaining_sharemap:
                # no servers for that shnum. note that DictOfSets removes
                # empty sets from the dict for us.
                continue
            peerid = list(self.remaining_sharemap[shnum])[0]
            # get_data will remove that peerid from the sharemap, and add the
            # query to self._outstanding_queries
            self._status.set_status("Retrieving More Shares")
            self.get_data(shnum, peerid)
            needed -= 1
            if not needed:
                break

        # at this point, we have as many outstanding queries as we can. If
        # needed!=0 then we might not have enough to recover the file.
        if needed:
            format = ("ran out of peers: "
                      "have %(have)d shares (k=%(k)d), "
                      "%(outstanding)d queries in flight, "
                      "need %(need)d more, "
                      "found %(bad)d bad shares")
            args = {"have": len(self.shares),
                    "k": k,
                    "outstanding": len(self._outstanding_queries),
                    "need": needed,
                    "bad": len(self._bad_shares),
                    }
            self.log(format=format,
                     level=log.WEIRD, umid="ezTfjw", **args)
            err = NotEnoughSharesError("%s, last failure: %s" %
                                      (format % args, self._last_failure))
            if self._bad_shares:
                self.log("We found some bad shares this pass. You should "
                         "update the servermap and try again to check "
                         "more peers",
                         level=log.WEIRD, umid="EFkOlA")
                err.servermap = self.servermap
            raise err

        return