Beispiel #1
0
    def _add_lease_failed(self, f, peerid, storage_index):
        # Older versions of Tahoe didn't handle the add-lease message very
        # well: <=1.1.0 throws a NameError because it doesn't implement
        # remote_add_lease(), 1.2.0/1.3.0 throw IndexError on unknown buckets
        # (which is most of them, since we send add-lease to everybody,
        # before we know whether or not they have any shares for us), and
        # 1.2.0 throws KeyError even on known buckets due to an internal bug
        # in the latency-measuring code.

        # we want to ignore the known-harmless errors and log the others. In
        # particular we want to log any local errors caused by coding
        # problems.

        if f.check(DeadReferenceError):
            return
        if f.check(RemoteException):
            if f.value.failure.check(KeyError, IndexError, NameError):
                # this may ignore a bit too much, but that only hurts us
                # during debugging
                return
            self.log(format="error in add_lease from [%(peerid)s]: %(f_value)s",
                     peerid=idlib.shortnodeid_b2a(peerid),
                     f_value=str(f.value),
                     failure=f,
                     level=log.WEIRD, umid="iqg3mw")
            return
        # local errors are cause for alarm
        log.err(f,
                format="local error in add_lease to [%(peerid)s]: %(f_value)s",
                peerid=idlib.shortnodeid_b2a(peerid),
                f_value=str(f.value),
                level=log.WEIRD, umid="ZWh6HA")
Beispiel #2
0
    def _got_results_one_share(self, shnum, data, peerid, lp):
        self.log(format="_got_results: got shnum #%(shnum)d from peerid %(peerid)s",
                 shnum=shnum,
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY,
                 parent=lp)

        # this might raise NeedMoreDataError, if the pubkey and signature
        # live at some weird offset. That shouldn't happen, so I'm going to
        # treat it as a bad share.
        (seqnum, root_hash, IV, k, N, segsize, datalength,
         pubkey_s, signature, prefix) = unpack_prefix_and_signature(data)

        if not self._node.get_pubkey():
            fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
            assert len(fingerprint) == 32
            if fingerprint != self._node.get_fingerprint():
                raise CorruptShareError(peerid, shnum,
                                        "pubkey doesn't match fingerprint")
            self._node._populate_pubkey(self._deserialize_pubkey(pubkey_s))

        if self._need_privkey:
            self._try_to_extract_privkey(data, peerid, shnum, lp)

        (ig_version, ig_seqnum, ig_root_hash, ig_IV, ig_k, ig_N,
         ig_segsize, ig_datalen, offsets) = unpack_header(data)
        offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] )

        verinfo = (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
                   offsets_tuple)

        if verinfo not in self._valid_versions:
            # it's a new pair. Verify the signature.
            valid = self._node.get_pubkey().verify(prefix, signature)
            if not valid:
                raise CorruptShareError(peerid, shnum, "signature is invalid")

            # ok, it's a valid verinfo. Add it to the list of validated
            # versions.
            self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
                     % (seqnum, base32.b2a(root_hash)[:4],
                        idlib.shortnodeid_b2a(peerid), shnum,
                        k, N, segsize, datalength),
                     parent=lp)
            self._valid_versions.add(verinfo)
        # We now know that this is a valid candidate verinfo.

        if (peerid, shnum) in self._servermap.bad_shares:
            # we've been told that the rest of the data in this share is
            # unusable, so don't add it to the servermap.
            self.log("but we've been told this is a bad share",
                     parent=lp, level=log.UNUSUAL)
            return verinfo

        # Add the info to our servermap.
        timestamp = time.time()
        self._servermap.add_new_share(peerid, shnum, verinfo, timestamp)
        # and the versionmap
        self.versionmap.add(verinfo, (shnum, peerid, timestamp))
        return verinfo
Beispiel #3
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = tcp:0:interface=127.0.0.1\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path),
                    tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = NoNetworkClient(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers  # can be updated later
        c.setServiceParent(self)
        return c
Beispiel #4
0
    def _got_results_one_share(self, shnum, peerid,
                               got_prefix, got_hash_and_data):
        self.log("_got_results: got shnum #%d from peerid %s"
                 % (shnum, idlib.shortnodeid_b2a(peerid)))
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(got_prefix) == len(prefix), (len(got_prefix), len(prefix))
        if got_prefix != prefix:
            msg = "someone wrote to the data since we read the servermap: prefix changed"
            raise UncoordinatedWriteError(msg)
        (share_hash_chain, block_hash_tree,
         share_data) = unpack_share_data(self.verinfo, got_hash_and_data)

        assert isinstance(share_data, str)
        # build the block hash tree. SDMF has only one leaf.
        leaves = [hashutil.block_hash(share_data)]
        t = hashtree.HashTree(leaves)
        if list(t) != block_hash_tree:
            raise CorruptShareError(peerid, shnum, "block hash tree failure")
        share_hash_leaf = t[0]
        t2 = hashtree.IncompleteHashTree(N)
        # root_hash was checked by the signature
        t2.set_hashes({0: root_hash})
        try:
            t2.set_hashes(hashes=share_hash_chain,
                          leaves={shnum: share_hash_leaf})
        except (hashtree.BadHashError, hashtree.NotEnoughHashesError,
                IndexError), e:
            msg = "corrupt hashes: %s" % (e,)
            raise CorruptShareError(peerid, shnum, msg)
Beispiel #5
0
    def _timing_chart(self):
        started = self.update_status.get_started()
        total = self.update_status.timings.get("total")
        per_server = self.update_status.timings.get("per_server")
        base = "http://chart.apis.google.com/chart?"
        pieces = ["cht=bhs"]
        pieces.append("chco=ffffff,4d89f9,c6d9fd")  # colors
        data0 = []
        data1 = []
        data2 = []
        nb_nodes = 0
        graph_botom_margin = 21
        graph_top_margin = 5
        peerids_s = []
        top_abs = started
        # we sort the queries by the time at which we sent the first request
        sorttable = [(times[0][1], peerid) for peerid, times in per_server.items()]
        sorttable.sort()
        peerids = [t[1] for t in sorttable]

        for peerid in peerids:
            nb_nodes += 1
            times = per_server[peerid]
            peerid_s = idlib.shortnodeid_b2a(peerid)
            peerids_s.append(peerid_s)
            # for servermap updates, there are either one or two queries per
            # peer. The second (if present) is to get the privkey.
            op, q_started, q_elapsed = times[0]
            data0.append("%.3f" % (q_started - started))
            data1.append("%.3f" % q_elapsed)
            top_abs = max(top_abs, q_started + q_elapsed)
            if len(times) > 1:
                op, p_started, p_elapsed = times[0]
                data2.append("%.3f" % p_elapsed)
                top_abs = max(top_abs, p_started + p_elapsed)
            else:
                data2.append("0.0")
        finished = self.update_status.get_finished()
        if finished:
            top_abs = max(top_abs, finished)
        top_rel = top_abs - started
        chs = "chs=400x%d" % ((nb_nodes * 28) + graph_top_margin + graph_botom_margin)
        chd = "chd=t:" + "|".join([",".join(data0), ",".join(data1), ",".join(data2)])
        pieces.append(chd)
        pieces.append(chs)
        chds = "chds=0,%0.3f" % top_rel
        pieces.append(chds)
        pieces.append("chxt=x,y")
        pieces.append("chxr=0,0.0,%0.3f" % top_rel)
        pieces.append("chxl=1:|" + "|".join(reversed(peerids_s)))
        # use up to 10 grid lines, at decimal multiples.
        # mathutil.next_power_of_k doesn't handle numbers smaller than one,
        # unfortunately.
        # pieces.append("chg="

        if total is not None:
            finished_f = 1.0 * total / top_rel
            pieces.append("chm=r,FF0000,0,%0.3f,%0.3f" % (finished_f, finished_f + 0.01))
        url = base + "&".join(pieces)
        return T.img(src=url, border="1", align="right", float="right")
Beispiel #6
0
 def render_privkey_from(self, ctx, data):
     peerid = data.get_privkey_from()
     if peerid:
         return ctx.tag["Got privkey from: [%s]"
                        % idlib.shortnodeid_b2a(peerid)]
     else:
         return ""
Beispiel #7
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            from twisted.internet import reactor
            _, port_endpoint = self.port_assigner.assign(reactor)
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = {}\n".format(port_endpoint))
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = yield create_no_network_client(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers # can be updated later
        c.setServiceParent(self)
        defer.returnValue(c)
Beispiel #8
0
 def render_privkey_from(self, ctx, data):
     peerid = data.get_privkey_from()
     if peerid:
         return ctx.tag["Got privkey from: [%s]" %
                        idlib.shortnodeid_b2a(peerid)]
     else:
         return ""
Beispiel #9
0
    def _got_results_one_share(self, shnum, peerid, got_prefix,
                               got_hash_and_data):
        self.log("_got_results: got shnum #%d from peerid %s" %
                 (shnum, idlib.shortnodeid_b2a(peerid)))
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        assert len(got_prefix) == len(prefix), (len(got_prefix), len(prefix))
        if got_prefix != prefix:
            msg = "someone wrote to the data since we read the servermap: prefix changed"
            raise UncoordinatedWriteError(msg)
        (share_hash_chain, block_hash_tree,
         share_data) = unpack_share_data(self.verinfo, got_hash_and_data)

        assert isinstance(share_data, str)
        # build the block hash tree. SDMF has only one leaf.
        leaves = [hashutil.block_hash(share_data)]
        t = hashtree.HashTree(leaves)
        if list(t) != block_hash_tree:
            raise CorruptShareError(peerid, shnum, "block hash tree failure")
        share_hash_leaf = t[0]
        t2 = hashtree.IncompleteHashTree(N)
        # root_hash was checked by the signature
        t2.set_hashes({0: root_hash})
        try:
            t2.set_hashes(hashes=share_hash_chain,
                          leaves={shnum: share_hash_leaf})
        except (hashtree.BadHashError, hashtree.NotEnoughHashesError,
                IndexError), e:
            msg = "corrupt hashes: %s" % (e, )
            raise CorruptShareError(peerid, shnum, msg)
Beispiel #10
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            from twisted.internet import reactor
            _, port_endpoint = self.port_assigner.assign(reactor)
            with open(tahoe_cfg_path, "w") as f:
                f.write("[node]\n")
                f.write("nickname = client-%d\n" % i)
                f.write("web.port = {}\n".format(port_endpoint))
                f.write("[storage]\n")
                f.write("enabled = false\n")
        else:
            _assert(os.path.exists(tahoe_cfg_path),
                    tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = yield create_no_network_client(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers  # can be updated later
        c.setServiceParent(self)
        defer.returnValue(c)
Beispiel #11
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = tcp:0:interface=127.0.0.1\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = NoNetworkClient(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers # can be updated later
        c.setServiceParent(self)
        return c
Beispiel #12
0
 def log_goal(self, goal, message=""):
     logmsg = [message]
     for (shnum, peerid) in sorted([(s, p) for (p, s) in goal]):
         logmsg.append("sh%d to [%s]" %
                       (shnum, idlib.shortnodeid_b2a(peerid)))
     self.log("current goal: %s" % (", ".join(logmsg)), level=log.NOISY)
     self.log("we are planning to push new seqnum=#%d" % self._new_seqnum,
              level=log.NOISY)
Beispiel #13
0
 def render_server_problem(self, ctx, data):
     serverid = data
     data = [idlib.shortnodeid_b2a(serverid)]
     sb = self.client.get_storage_broker()
     nickname = sb.get_nickname_for_serverid(serverid)
     if nickname:
         data.append(" (%s)" % self._html(nickname))
     return ctx.tag[data]
Beispiel #14
0
 def __init__(self, rref, peerid, storage_index):
     self._rref = rref
     self._peerid = peerid
     peer_id_s = idlib.shortnodeid_b2a(peerid)
     storage_index_s = si_b2a(storage_index)
     self._reprstr = "<ReadBucketProxy %s to peer [%s] SI %s>" % (id(self), peer_id_s, storage_index_s)
     self._started = False # sent request to server
     self._ready = observer.OneShotObserverList() # got response from server
Beispiel #15
0
 def log_goal(self, goal, message=""):
     logmsg = [message]
     for (shnum, peerid) in sorted([(s,p) for (p,s) in goal]):
         logmsg.append("sh%d to [%s]" % (shnum,
                                         idlib.shortnodeid_b2a(peerid)))
     self.log("current goal: %s" % (", ".join(logmsg)), level=log.NOISY)
     self.log("we are planning to push new seqnum=#%d" % self._new_seqnum,
              level=log.NOISY)
Beispiel #16
0
 def _got(server_problems):
     if not server_problems:
         return ""
     l = T.ul()
     for peerid in sorted(server_problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, server_problems[peerid])]]
     return T.li["Server Problems:", l]
Beispiel #17
0
 def _got(servers_used):
     if not servers_used:
         return ""
     peerids_s = ", ".join([
         "[%s]" % idlib.shortnodeid_b2a(peerid)
         for peerid in servers_used
     ])
     return T.li["Servers Used: ", peerids_s]
Beispiel #18
0
 def _render(sharemap):
     if sharemap is None:
         return "None"
     l = T.ul()
     for shnum, peerids in sorted(sharemap.items()):
         peerids = ', '.join([idlib.shortnodeid_b2a(i) for i in peerids])
         l[T.li["%d -> placed on [%s]" % (shnum, peerids)]]
     return l
Beispiel #19
0
 def _got(server_problems):
     if not server_problems:
         return ""
     l = T.ul()
     for peerid in sorted(server_problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, server_problems[peerid])]]
     return T.li["Server Problems:", l]
Beispiel #20
0
 def _oops(f):
     self.log(format=
              "problem in _query_failed for sh#%(shnum)d to %(peerid)s",
              shnum=shnum,
              peerid=idlib.shortnodeid_b2a(peerid),
              failure=f,
              level=log.WEIRD,
              umid="W0xnQA")
Beispiel #21
0
 def render_server_problem(self, ctx, data):
     serverid = data
     data = [idlib.shortnodeid_b2a(serverid)]
     sb = self.client.get_storage_broker()
     nickname = sb.get_nickname_for_serverid(serverid)
     if nickname:
         data.append(" (%s)" % self._html(nickname))
     return ctx.tag[data]
Beispiel #22
0
 def render_problems(self, ctx, data):
     problems = data.problems
     if not problems:
         return ""
     l = T.ul()
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
     return ctx.tag["Server Problems:", l]
Beispiel #23
0
 def _render(per_server):
     if per_server is None:
         return ""
     l = T.ul()
     for peerid in sorted(per_server.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         times_s = ", ".join([self.render_time(None, t) for t in per_server[peerid]])
         l[T.li["[%s]: %s" % (peerid_s, times_s)]]
     return T.li["Per-Server Segment Fetch Response Times: ", l]
Beispiel #24
0
 def _render(sharemap):
     if sharemap is None:
         return "None"
     l = T.ul()
     for shnum, peerids in sorted(sharemap.items()):
         peerids = ', '.join(
             [idlib.shortnodeid_b2a(i) for i in peerids])
         l[T.li["%d -> placed on [%s]" % (shnum, peerids)]]
     return l
Beispiel #25
0
 def render_problems(self, ctx, data):
     problems = data.problems
     if not problems:
         return ""
     l = T.ul()
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
     return ctx.tag["Server Problems:", l]
Beispiel #26
0
 def make_server(self, i, readonly=False):
     serverid = hashutil.tagged_hash("serverid", str(i))[:20]
     serverdir = os.path.join(self.basedir, "servers",
                              idlib.shortnodeid_b2a(serverid), "storage")
     fileutil.make_dirs(serverdir)
     ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(),
                        readonly_storage=readonly)
     ss._no_network_server_number = i
     return ss
Beispiel #27
0
 def _render(servermap):
     if servermap is None:
         return "None"
     l = T.ul()
     for peerid in sorted(servermap.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         shares_s = ",".join(["#%d" % shnum for shnum in servermap[peerid]])
         l[T.li["[%s] got share%s: %s" % (peerid_s, plural(servermap[peerid]), shares_s)]]
     return l
Beispiel #28
0
 def problems(self, req, tag):
     problems = self._update_status.problems
     if not problems:
         return tag
     l = tags.ul()
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l(tags.li("[%s]: %s" % (peerid_s, problems[peerid])))
     return tag("Server Problems:", l)
Beispiel #29
0
 def problems(self, req, tag):
     problems = self._retrieve_status.get_problems()
     if not problems:
         return ""
     ul = tags.ul()
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         ul(tags.li("[%s]: %s" % (peerid_s, problems[peerid])))
     return tag("Server Problems:", ul)
Beispiel #30
0
 def make_server(self, i, readonly=False):
     serverid = hashutil.tagged_hash("serverid", str(i))[:20]
     serverdir = os.path.join(self.basedir, "servers",
                              idlib.shortnodeid_b2a(serverid), "storage")
     fileutil.make_dirs(serverdir)
     ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(),
                        readonly_storage=readonly)
     ss._no_network_server_number = i
     return ss
Beispiel #31
0
    def _process_announcement(self, ann):
        self._debug_counts["inbound_announcement"] += 1
        (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann
        if service_name not in self._subscribed_service_names:
            self.log(
                "announcement for a service we don't care about [%s]" % (service_name,),
                level=log.UNUSUAL,
                umid="dIpGNA",
            )
            self._debug_counts["wrong_service"] += 1
            return
        self.log("announcement for [%s]: %s" % (service_name, ann), umid="BoKEag")
        assert type(furl) is str
        assert type(service_name) is str
        assert type(ri_name) is str
        assert type(nickname_utf8) is str
        nickname = nickname_utf8.decode("utf-8")
        assert type(nickname) is unicode
        assert type(ver) is str
        assert type(oldest) is str

        nodeid = b32decode(SturdyRef(furl).tubID.upper())
        nodeid_s = idlib.shortnodeid_b2a(nodeid)

        ann_d = {
            "version": 0,
            "service-name": service_name,
            "FURL": furl,
            "nickname": nickname,
            "app-versions": {},  # need #466 and v2 introducer
            "my-version": ver,
            "oldest-supported": oldest,
        }

        index = (service_name, nodeid)
        if self._current_announcements.get(index, None) == ann_d:
            self.log(
                "reannouncement for [%(service)s]:%(nodeid)s, ignoring",
                service=service_name,
                nodeid=nodeid_s,
                level=log.UNUSUAL,
                umid="B1MIdA",
            )
            self._debug_counts["duplicate_announcement"] += 1
            return
        if index in self._current_announcements:
            self._debug_counts["update"] += 1
        else:
            self._debug_counts["new_announcement"] += 1

        self._current_announcements[index] = ann_d
        # note: we never forget an index, but we might update its value

        for (service_name2, cb, args, kwargs) in self._local_subscribers:
            if service_name2 == service_name:
                eventually(cb, nodeid, ann_d, *args, **kwargs)
Beispiel #32
0
 def render_server_timings(self, ctx, data):
     per_server = self.publish_status.timings.get("send_per_server")
     if not per_server:
         return ""
     l = T.ul()
     for peerid in sorted(per_server.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         times_s = ", ".join([self.render_time(None, t) for t in per_server[peerid]])
         l[T.li["[%s]: %s" % (peerid_s, times_s)]]
     return T.li["Per-Server Response Times: ", l]
Beispiel #33
0
 def render_share_problem(self, ctx, data):
     serverid, storage_index, sharenum = data
     sb = self.client.get_storage_broker()
     nickname = sb.get_nickname_for_serverid(serverid)
     ctx.fillSlots("serverid", idlib.shortnodeid_b2a(serverid))
     if nickname:
         ctx.fillSlots("nickname", self._html(nickname))
     ctx.fillSlots("si", self._render_si_link(ctx, storage_index))
     ctx.fillSlots("shnum", str(sharenum))
     return ctx.tag
Beispiel #34
0
 def _render(per_server):
     if per_server is None:
         return ""
     l = T.ul()
     for peerid in sorted(per_server.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         times_s = ", ".join(
             [self.render_time(None, t) for t in per_server[peerid]])
         l[T.li["[%s]: %s" % (peerid_s, times_s)]]
     return T.li["Per-Server Segment Fetch Response Times: ", l]
Beispiel #35
0
 def render_share_problem(self, ctx, data):
     serverid, storage_index, sharenum = data
     sb = self.client.get_storage_broker()
     nickname = sb.get_nickname_for_serverid(serverid)
     ctx.fillSlots("serverid", idlib.shortnodeid_b2a(serverid))
     if nickname:
         ctx.fillSlots("nickname", self._html(nickname))
     ctx.fillSlots("si", self._render_si_link(ctx, storage_index))
     ctx.fillSlots("shnum", str(sharenum))
     return ctx.tag
Beispiel #36
0
 def render_sharemap(self, ctx, data):
     servermap = data.get_servermap()
     if servermap is None:
         return ctx.tag["None"]
     l = T.ul()
     sharemap = servermap.make_sharemap()
     for shnum in sorted(sharemap.keys()):
         l[T.li["%d -> Placed on " % shnum,
                ", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
                           for peerid in sharemap[shnum]])]]
     return ctx.tag["Sharemap:", l]
Beispiel #37
0
    def _process_announcement(self, ann):
        self._debug_counts["inbound_announcement"] += 1
        (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann
        if service_name not in self._subscribed_service_names:
            self.log("announcement for a service we don't care about [%s]" %
                     (service_name, ),
                     level=log.UNUSUAL,
                     umid="dIpGNA")
            self._debug_counts["wrong_service"] += 1
            return
        self.log("announcement for [%s]: %s" % (service_name, ann),
                 umid="BoKEag")
        assert type(furl) is str
        assert type(service_name) is str
        assert type(ri_name) is str
        assert type(nickname_utf8) is str
        nickname = nickname_utf8.decode("utf-8")
        assert type(nickname) is unicode
        assert type(ver) is str
        assert type(oldest) is str

        nodeid = b32decode(SturdyRef(furl).tubID.upper())
        nodeid_s = idlib.shortnodeid_b2a(nodeid)

        ann_d = {
            "version": 0,
            "service-name": service_name,
            "FURL": furl,
            "nickname": nickname,
            "app-versions": {},  # need #466 and v2 introducer
            "my-version": ver,
            "oldest-supported": oldest,
        }

        index = (service_name, nodeid)
        if self._current_announcements.get(index, None) == ann_d:
            self.log("reannouncement for [%(service)s]:%(nodeid)s, ignoring",
                     service=service_name,
                     nodeid=nodeid_s,
                     level=log.UNUSUAL,
                     umid="B1MIdA")
            self._debug_counts["duplicate_announcement"] += 1
            return
        if index in self._current_announcements:
            self._debug_counts["update"] += 1
        else:
            self._debug_counts["new_announcement"] += 1

        self._current_announcements[index] = ann_d
        # note: we never forget an index, but we might update its value

        for (service_name2, cb, args, kwargs) in self._local_subscribers:
            if service_name2 == service_name:
                eventually(cb, nodeid, ann_d, *args, **kwargs)
Beispiel #38
0
 def problems(self, req, tag):
     problems = self._publish_status.get_problems()
     if not problems:
         return tag()
     l = tags.ul()
     # XXX: is this exercised? I don't think PublishStatus.problems is
     # ever populated
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l(tags.li("[%s]: %s" % (peerid_s, problems[peerid])))
     return tag(tags.li("Server Problems:", l))
Beispiel #39
0
 def _got_response(self, buckets, peerid):
     # buckets is a dict: maps shum to an rref of the server who holds it
     shnums_s = ",".join([str(shnum) for shnum in buckets])
     self.log("got_response: [%s] has %d shares (%s)" %
              (idlib.shortnodeid_b2a(peerid), len(buckets), shnums_s),
              level=log.NOISY)
     self._found_shares.update(buckets.keys())
     for k in buckets:
         self._sharemap.add(k, peerid)
     self._readers.update( [ (bucket, peerid)
                             for bucket in buckets.values() ] )
Beispiel #40
0
 def render_server_timings(self, ctx, data):
     per_server = self.publish_status.timings.get("send_per_server")
     if not per_server:
         return ""
     l = T.ul()
     for peerid in sorted(per_server.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         times_s = ", ".join(
             [self.render_time(None, t) for t in per_server[peerid]])
         l[T.li["[%s]: %s" % (peerid_s, times_s)]]
     return T.li["Per-Server Response Times: ", l]
Beispiel #41
0
 def render_problems(self, ctx, data):
     problems = data.get_problems()
     if not problems:
         return ""
     l = T.ul()
     # XXX: is this exercised? I don't think PublishStatus.problems is
     # ever populated
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
     return ctx.tag["Server Problems:", l]
Beispiel #42
0
 def _render(servermap):
     if servermap is None:
         return "None"
     l = T.ul()
     for peerid in sorted(servermap.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         shares_s = ",".join(
             ["#%d" % shnum for shnum in servermap[peerid]])
         l[T.li["[%s] has share%s: %s" %
                (peerid_s, plural(servermap[peerid]), shares_s)]]
     return l
Beispiel #43
0
 def render_problems(self, ctx, data):
     problems = data.get_problems()
     if not problems:
         return ""
     l = T.ul()
     # XXX: is this exercised? I don't think PublishStatus.problems is
     # ever populated
     for peerid in sorted(problems.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
     return ctx.tag["Server Problems:", l]
Beispiel #44
0
 def _got_write_error(self, f, peerid, shnums, started):
     for shnum in shnums:
         self.outstanding.discard( (peerid, shnum) )
     self.bad_peers.add(peerid)
     if self._first_write_error is None:
         self._first_write_error = f
     self.log(format="error while writing shares %(shnums)s to peerid %(peerid)s",
              shnums=list(shnums), peerid=idlib.shortnodeid_b2a(peerid),
              failure=f,
              level=log.UNUSUAL)
     # self.loop() will take care of checking to see if we're done
     return
Beispiel #45
0
 def _got_response(self, buckets, server_version, peerid, req, d_ev,
                   time_sent, lp):
     shnums = sorted([shnum for shnum in buckets])
     time_received = now()
     d_ev.finished(shnums, time_received)
     dyhb_rtt = time_received - time_sent
     if not buckets:
         self.log(format="no shares from [%(peerid)s]",
                  peerid=idlib.shortnodeid_b2a(peerid),
                  level=log.NOISY, parent=lp, umid="U7d4JA")
         return
     shnums_s = ",".join([str(shnum) for shnum in shnums])
     self.log(format="got shnums [%(shnums)s] from [%(peerid)s]",
              shnums=shnums_s, peerid=idlib.shortnodeid_b2a(peerid),
              level=log.NOISY, parent=lp, umid="0fcEZw")
     shares = []
     for shnum, bucket in buckets.iteritems():
         s = self._create_share(shnum, bucket, server_version, peerid,
                                dyhb_rtt)
         shares.append(s)
     self._deliver_shares(shares)
Beispiel #46
0
    def __init__(self, serverid, ann_d, min_shares=1):
        self.serverid = serverid
        self.announcement = ann_d
        self.min_shares = min_shares

        self.serverid_s = idlib.shortnodeid_b2a(self.serverid)
        self.announcement_time = time.time()
        self.last_connect_time = None
        self.last_loss_time = None
        self.remote_host = None
        self.rref = None
        self._reconnector = None
        self._trigger_cb = None
Beispiel #47
0
    def __init__(self, serverid, ann_d, min_shares=1):
        self.serverid = serverid
        self._tubid = serverid
        self.announcement = ann_d
        self.min_shares = min_shares

        self.serverid_s = idlib.shortnodeid_b2a(self.serverid)
        self.announcement_time = time.time()
        self.last_connect_time = None
        self.last_loss_time = None
        self.remote_host = None
        self.rref = None
        self._reconnector = None
        self._trigger_cb = None
Beispiel #48
0
 def _got_write_error(self, f, peerid, shnums, started):
     for shnum in shnums:
         self.outstanding.discard((peerid, shnum))
     self.bad_peers.add(peerid)
     if self._first_write_error is None:
         self._first_write_error = f
     self.log(
         format="error while writing shares %(shnums)s to peerid %(peerid)s",
         shnums=list(shnums),
         peerid=idlib.shortnodeid_b2a(peerid),
         failure=f,
         level=log.UNUSUAL)
     # self.loop() will take care of checking to see if we're done
     return
Beispiel #49
0
    def dump(self, out=sys.stdout):
        print >>out, "servermap:"

        for ( (peerid, shnum), (verinfo, timestamp) ) in self.servermap.items():
            (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
             offsets_tuple) = verinfo
            print >>out, ("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
                          (idlib.shortnodeid_b2a(peerid), shnum,
                           seqnum, base32.b2a(root_hash)[:4], k, N,
                           datalength))
        if self.problems:
            print >>out, "%d PROBLEMS" % len(self.problems)
            for f in self.problems:
                print >>out, str(f)
        return out
Beispiel #50
0
 def _query_failed(self, f, marker, peerid):
     self.log(format="query to [%(peerid)s] failed",
              peerid=idlib.shortnodeid_b2a(peerid),
              level=log.NOISY)
     self._status.problems[peerid] = f
     self._outstanding_queries.pop(marker, None)
     if not self._running:
         return
     self._last_failure = f
     self.remove_peer(peerid)
     level = log.WEIRD
     if f.check(DeadReferenceError):
         level = log.UNUSUAL
     self.log(format="error during query: %(f_value)s",
              f_value=str(f.value), failure=f, level=level, umid="gOJB5g")
Beispiel #51
0
    def __init__(self,
                 basedir,
                 num_clients=1,
                 num_servers=10,
                 client_config_hooks={}):
        service.MultiService.__init__(self)
        self.basedir = basedir
        fileutil.make_dirs(basedir)

        self.servers_by_number = {}  # maps to StorageServer instance
        self.wrappers_by_id = {}  # maps to wrapped StorageServer instance
        self.proxies_by_id = {}  # maps to IServer on which .rref is a wrapped
        # StorageServer
        self.clients = []

        for i in range(num_servers):
            ss = self.make_server(i)
            self.add_server(i, ss)
        self.rebuild_serverlist()

        for i in range(num_clients):
            clientid = hashutil.tagged_hash("clientid", str(i))[:20]
            clientdir = os.path.join(basedir, "clients",
                                     idlib.shortnodeid_b2a(clientid))
            fileutil.make_dirs(clientdir)
            f = open(os.path.join(clientdir, "tahoe.cfg"), "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = tcp:0:interface=127.0.0.1\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
            c = None
            if i in client_config_hooks:
                # this hook can either modify tahoe.cfg, or return an
                # entirely new Client instance
                c = client_config_hooks[i](clientdir)
            if not c:
                c = NoNetworkClient(clientdir)
                c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
            c.nodeid = clientid
            c.short_nodeid = b32encode(clientid).lower()[:8]
            c._servers = self.all_servers  # can be updated later
            c.setServiceParent(self)
            self.clients.append(c)
Beispiel #52
0
    def _try_to_validate_privkey(self, enc_privkey, peerid, shnum, lp):

        alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
        alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
        if alleged_writekey != self._node.get_writekey():
            self.log("invalid privkey from %s shnum %d" %
                     (idlib.nodeid_b2a(peerid)[:8], shnum),
                     parent=lp, level=log.WEIRD, umid="YIw4tA")
            return

        # it's good
        self.log("got valid privkey from shnum %d on peerid %s" %
                 (shnum, idlib.shortnodeid_b2a(peerid)),
                 parent=lp)
        privkey = rsa.create_signing_key_from_string(alleged_privkey_s)
        self._node._populate_encprivkey(enc_privkey)
        self._node._populate_privkey(privkey)
        self._need_privkey = False
Beispiel #53
0
 def _query_failed(self, f, marker, peerid):
     self.log(format="query to [%(peerid)s] failed",
              peerid=idlib.shortnodeid_b2a(peerid),
              level=log.NOISY)
     self._status.problems[peerid] = f
     self._outstanding_queries.pop(marker, None)
     if not self._running:
         return
     self._last_failure = f
     self.remove_peer(peerid)
     level = log.WEIRD
     if f.check(DeadReferenceError):
         level = log.UNUSUAL
     self.log(format="error during query: %(f_value)s",
              f_value=str(f.value),
              failure=f,
              level=level,
              umid="gOJB5g")
Beispiel #54
0
 def _do_query(self, ss, peerid, storage_index, readsize):
     self.log(format="sending query to [%(peerid)s], readsize=%(readsize)d",
              peerid=idlib.shortnodeid_b2a(peerid),
              readsize=readsize,
              level=log.NOISY)
     self._servermap.connections[peerid] = ss
     started = time.time()
     self._queries_outstanding.add(peerid)
     d = self._do_read(ss, peerid, storage_index, [], [(0, readsize)])
     d.addCallback(self._got_results, peerid, readsize, (ss, storage_index),
                   started)
     d.addErrback(self._query_failed, peerid)
     # errors that aren't handled by _query_failed (and errors caused by
     # _query_failed) get logged, but we still want to check for doneness.
     d.addErrback(log.err)
     d.addBoth(self._check_for_done)
     d.addErrback(self._fatal_error)
     return d
Beispiel #55
0
    def _got_results(self, datavs, peerid, readsize, stuff, started):
        lp = self.log(format="got result from [%(peerid)s], %(numshares)d shares",
                      peerid=idlib.shortnodeid_b2a(peerid),
                      numshares=len(datavs),
                      level=log.NOISY)
        now = time.time()
        elapsed = now - started
        self._queries_outstanding.discard(peerid)
        self._servermap.reachable_peers.add(peerid)
        self._must_query.discard(peerid)
        self._queries_completed += 1
        if not self._running:
            self.log("but we're not running, so we'll ignore it", parent=lp,
                     level=log.NOISY)
            self._status.add_per_server_time(peerid, "late", started, elapsed)
            return
        self._status.add_per_server_time(peerid, "query", started, elapsed)

        if datavs:
            self._good_peers.add(peerid)
        else:
            self._empty_peers.add(peerid)

        last_verinfo = None
        last_shnum = None
        for shnum,datav in datavs.items():
            data = datav[0]
            try:
                verinfo = self._got_results_one_share(shnum, data, peerid, lp)
                last_verinfo = verinfo
                last_shnum = shnum
                self._node._add_to_cache(verinfo, shnum, 0, data)
            except CorruptShareError, e:
                # log it and give the other shares a chance to be processed
                f = failure.Failure()
                self.log(format="bad share: %(f_value)s", f_value=str(f.value),
                         failure=f, parent=lp, level=log.WEIRD, umid="h5llHg")
                self.notify_server_corruption(peerid, shnum, str(e))
                self._bad_peers.add(peerid)
                self._last_failure = f
                checkstring = data[:SIGNED_PREFIX_LENGTH]
                self._servermap.mark_bad_share(peerid, shnum, checkstring)
                self._servermap.problems.append(f)
                pass
Beispiel #56
0
    def _try_to_validate_privkey(self, enc_privkey, peerid, shnum, lp):

        alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
        alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
        if alleged_writekey != self._node.get_writekey():
            self.log("invalid privkey from %s shnum %d" %
                     (idlib.nodeid_b2a(peerid)[:8], shnum),
                     parent=lp,
                     level=log.WEIRD,
                     umid="YIw4tA")
            return

        # it's good
        self.log("got valid privkey from shnum %d on peerid %s" %
                 (shnum, idlib.shortnodeid_b2a(peerid)),
                 parent=lp)
        privkey = rsa.create_signing_key_from_string(alleged_privkey_s)
        self._node._populate_encprivkey(enc_privkey)
        self._node._populate_privkey(privkey)
        self._need_privkey = False
Beispiel #57
0
    def _got_results(self, datavs, marker, peerid, started, got_from_cache):
        now = time.time()
        elapsed = now - started
        if not got_from_cache:
            self._status.add_fetch_timing(peerid, elapsed)
        self.log(format="got results (%(shares)d shares) from [%(peerid)s]",
                 shares=len(datavs),
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY)
        self._outstanding_queries.pop(marker, None)
        if not self._running:
            return

        # note that we only ask for a single share per query, so we only
        # expect a single share back. On the other hand, we use the extra
        # shares if we get them.. seems better than an assert().

        for shnum, datav in datavs.items():
            (prefix, hash_and_data) = datav[:2]
            try:
                self._got_results_one_share(shnum, peerid, prefix,
                                            hash_and_data)
            except CorruptShareError, e:
                # log it and give the other shares a chance to be processed
                f = failure.Failure()
                self.log(format="bad share: %(f_value)s",
                         f_value=str(f.value),
                         failure=f,
                         level=log.WEIRD,
                         umid="7fzWZw")
                self.notify_server_corruption(peerid, shnum, str(e))
                self.remove_peer(peerid)
                self.servermap.mark_bad_share(peerid, shnum, prefix)
                self._bad_shares.add((peerid, shnum))
                self._status.problems[peerid] = f
                self._last_failure = f
                pass
            if self._need_privkey and len(datav) > 2:
                lp = None
                self._try_to_validate_privkey(datav[2], peerid, shnum, lp)
Beispiel #58
0
 def render_server_timings(self, ctx, data):
     per_server = self.update_status.timings.get("per_server")
     if not per_server:
         return ""
     l = T.ul()
     for peerid in sorted(per_server.keys()):
         peerid_s = idlib.shortnodeid_b2a(peerid)
         times = []
         for op, started, t in per_server[peerid]:
             #times.append("%s/%.4fs/%s/%s" % (op,
             #                              started,
             #                              self.render_time(None, started - self.update_status.get_started()),
             #                              self.render_time(None,t)))
             if op == "query":
                 times.append(self.render_time(None, t))
             elif op == "late":
                 times.append("late(" + self.render_time(None, t) + ")")
             else:
                 times.append("privkey(" + self.render_time(None, t) + ")")
         times_s = ", ".join(times)
         l[T.li["[%s]: %s" % (peerid_s, times_s)]]
     return T.li["Per-Server Response Times: ", l]
Beispiel #59
0
 def get_name(self):
     return idlib.shortnodeid_b2a(self.serverid)