Exemple #1
0
def json_check_counts(d):
    r = {}
    r["count-shares-good"] = d["count-shares-good"]
    r["count-shares-needed"] = d["count-shares-needed"]
    r["count-shares-expected"] = d["count-shares-expected"]
    r["count-good-share-hosts"] = d["count-good-share-hosts"]
    r["count-corrupt-shares"] = d["count-corrupt-shares"]
    r["list-corrupt-shares"] = [
        (idlib.nodeid_b2a(serverid), base32.b2a(si), shnum)
        for (serverid, si, shnum) in d["list-corrupt-shares"]
    ]
    r["servers-responding"] = [
        idlib.nodeid_b2a(serverid) for serverid in d["servers-responding"]
    ]
    sharemap = {}
    for (shareid, serverids) in d["sharemap"].items():
        sharemap[shareid] = [
            idlib.nodeid_b2a(serverid) for serverid in serverids
        ]
    r["sharemap"] = sharemap

    r["count-wrong-shares"] = d["count-wrong-shares"]
    r["count-recoverable-versions"] = d["count-recoverable-versions"]
    r["count-unrecoverable-versions"] = d["count-unrecoverable-versions"]

    return r
Exemple #2
0
def dump_mutable_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util import base32, idlib
    out = options.stdout
    m = MutableShareFile(options['filename'])
    f = open(options['filename'], "rb")
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    num_extra_leases = m._read_num_extra_leases(f)
    data_length = m._read_data_length(f)
    extra_lease_offset = m._read_extra_lease_offset(f)
    container_size = extra_lease_offset - m.DATA_OFFSET
    leases = list(m._enumerate_leases(f))

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == b"\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == b"\x01":
        share_type = "MDMF"
    f.close()

    print(file=out)
    print("Mutable slot found:", file=out)
    print(" share_type: %s" % share_type, file=out)
    print(" write_enabler: %s" % unicode(base32.b2a(WE), "utf-8"), file=out)
    print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
    print(" num_extra_leases: %d" % num_extra_leases, file=out)
    print(" container_size: %d" % container_size, file=out)
    print(" data_length: %d" % data_length, file=out)
    if leases:
        for (leasenum, lease) in leases:
            print(file=out)
            print(" Lease #%d:" % leasenum, file=out)
            print("  ownerid: %d" % lease.owner_num, file=out)
            when = format_expiration_time(lease.expiration_time)
            print("  expires in %s" % when, file=out)
            print("  renew_secret: %s" %
                  unicode(base32.b2a(lease.renew_secret), "utf-8"),
                  file=out)
            print("  cancel_secret: %s" %
                  unicode(base32.b2a(lease.cancel_secret), "utf-8"),
                  file=out)
            print("  secrets are for nodeid: %s" %
                  idlib.nodeid_b2a(lease.nodeid),
                  file=out)
    else:
        print("No leases.", file=out)
    print(file=out)

    if share_type == "SDMF":
        dump_SDMF_share(m, data_length, options)
    elif share_type == "MDMF":
        dump_MDMF_share(m, data_length, options)

    return 0
Exemple #3
0
def dump_mutable_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util import base32, idlib
    out = options.stdout
    m = MutableShareFile(options['filename'])
    f = open(options['filename'], "rb")
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    num_extra_leases = m._read_num_extra_leases(f)
    data_length = m._read_data_length(f)
    extra_lease_offset = m._read_extra_lease_offset(f)
    container_size = extra_lease_offset - m.DATA_OFFSET
    leases = list(m._enumerate_leases(f))

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == "\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == "\x01":
        share_type = "MDMF"
    f.close()

    print >>out
    print >>out, "Mutable slot found:"
    print >>out, " share_type: %s" % share_type
    print >>out, " write_enabler: %s" % base32.b2a(WE)
    print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
    print >>out, " num_extra_leases: %d" % num_extra_leases
    print >>out, " container_size: %d" % container_size
    print >>out, " data_length: %d" % data_length
    if leases:
        for (leasenum, lease) in leases:
            print >>out
            print >>out, " Lease #%d:" % leasenum
            print >>out, "  ownerid: %d" % lease.owner_num
            when = format_expiration_time(lease.expiration_time)
            print >>out, "  expires in %s" % when
            print >>out, "  renew_secret: %s" % base32.b2a(lease.renew_secret)
            print >>out, "  cancel_secret: %s" % base32.b2a(lease.cancel_secret)
            print >>out, "  secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
    else:
        print >>out, "No leases."
    print >>out

    if share_type == "SDMF":
        dump_SDMF_share(m, data_length, options)
    elif share_type == "MDMF":
        dump_MDMF_share(m, data_length, options)

    return 0
Exemple #4
0
    def render_service_row(self, ctx, descriptor):
        nodeid = descriptor.get_serverid()

        ctx.fillSlots("peerid", idlib.nodeid_b2a(nodeid))
        ctx.fillSlots("nickname", descriptor.get_nickname())
        rhost = descriptor.get_remote_host()
        if rhost:
            if nodeid == self.client.nodeid:
                rhost_s = "(loopback)"
            elif isinstance(rhost, address.IPv4Address):
                rhost_s = "%s:%d" % (rhost.host, rhost.port)
            else:
                rhost_s = str(rhost)
            connected = "Yes: to " + rhost_s
            since = descriptor.get_last_connect_time()
        else:
            connected = "No"
            since = descriptor.get_last_loss_time()
        announced = descriptor.get_announcement_time()
        announcement = descriptor.get_announcement()
        version = announcement["my-version"]
        service_name = announcement["service-name"]

        TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
        ctx.fillSlots("connected", connected)
        ctx.fillSlots("connected-bool", bool(rhost))
        ctx.fillSlots("since", time.strftime(TIME_FORMAT,
                                             time.localtime(since)))
        ctx.fillSlots("announced", time.strftime(TIME_FORMAT,
                                                 time.localtime(announced)))
        ctx.fillSlots("version", version)
        ctx.fillSlots("service_name", service_name)

        return ctx.tag
Exemple #5
0
    def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False):
        # type: (bytes, int, bool) -> None
        """
        Update the expiration time on an existing lease.

        :param allow_backdate: If ``True`` then allow the new expiration time
            to be before the current expiration time.  Otherwise, make no
            change when this is the case.

        :raise IndexError: If there is no lease matching the given renew
            secret.
        """
        accepting_nodeids = set()
        with open(self.home, 'rb+') as f:
            for (leasenum,lease) in self._enumerate_leases(f):
                if lease.is_renew_secret(renew_secret):
                    # yup. See if we need to update the owner time.
                    if allow_backdate or new_expire_time > lease.get_expiration_time():
                        # yes
                        lease = lease.renew(new_expire_time)
                        self._write_lease_record(f, leasenum, lease)
                    return
                accepting_nodeids.add(lease.nodeid)
        # Return the accepting_nodeids set, to give the client a chance to
        # update the leases on a share which has been migrated from its
        # original server to a new one.
        msg = ("Unable to renew non-existent lease. I have leases accepted by"
               " nodeids: ")
        msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                         for anid in accepting_nodeids])
        msg += " ."
        raise IndexError(msg)
Exemple #6
0
 def check_write_enabler(self, write_enabler, si_s):
     with open(self.home, 'rb+') as f:
         (real_write_enabler, write_enabler_nodeid) = \
                              self._read_write_enabler_and_nodeid(f)
     # avoid a timing attack
     #if write_enabler != real_write_enabler:
     if not timing_safe_compare(write_enabler, real_write_enabler):
         # accomodate share migration by reporting the nodeid used for the
         # old write enabler.
         self.log(format="bad write enabler on SI %(si)s,"
                  " recorded by nodeid %(nodeid)s",
                  facility="tahoe.storage",
                  level=log.WEIRD, umid="cE1eBQ",
                  si=si_s, nodeid=idlib.nodeid_b2a(write_enabler_nodeid))
         msg = "The write enabler was recorded by nodeid '%s'." % \
               (idlib.nodeid_b2a(write_enabler_nodeid),)
         raise BadWriteEnablerError(msg)
Exemple #7
0
 def check_write_enabler(self, write_enabler, si_s):
     f = open(self.home, 'rb+')
     (real_write_enabler, write_enabler_nodeid) = \
                          self._read_write_enabler_and_nodeid(f)
     f.close()
     # avoid a timing attack
     #if write_enabler != real_write_enabler:
     if not timing_safe_compare(write_enabler, real_write_enabler):
         # accomodate share migration by reporting the nodeid used for the
         # old write enabler.
         self.log(format="bad write enabler on SI %(si)s,"
                  " recorded by nodeid %(nodeid)s",
                  facility="tahoe.storage",
                  level=log.WEIRD, umid="cE1eBQ",
                  si=si_s, nodeid=idlib.nodeid_b2a(write_enabler_nodeid))
         msg = "The write enabler was recorded by nodeid '%s'." % \
               (idlib.nodeid_b2a(write_enabler_nodeid),)
         raise BadWriteEnablerError(msg)
Exemple #8
0
    def json(self, ctx):
        inevow.IRequest(ctx).setHeader("content-type", "text/plain")
        res = self.monitor.get_status()
        data = {}
        data["finished"] = self.monitor.is_finished()
        data["root-storage-index"] = res.get_root_storage_index_string()
        c = res.get_counters()
        data["count-objects-checked"] = c["count-objects-checked"]

        data["count-objects-healthy-pre-repair"] = c[
            "count-objects-healthy-pre-repair"]
        data["count-objects-unhealthy-pre-repair"] = c[
            "count-objects-unhealthy-pre-repair"]
        data["count-objects-healthy-post-repair"] = c[
            "count-objects-healthy-post-repair"]
        data["count-objects-unhealthy-post-repair"] = c[
            "count-objects-unhealthy-post-repair"]

        data["count-repairs-attempted"] = c["count-repairs-attempted"]
        data["count-repairs-successful"] = c["count-repairs-successful"]
        data["count-repairs-unsuccessful"] = c["count-repairs-unsuccessful"]

        data["count-corrupt-shares-pre-repair"] = c[
            "count-corrupt-shares-pre-repair"]
        data["count-corrupt-shares-post-repair"] = c[
            "count-corrupt-shares-pre-repair"]

        data["list-corrupt-shares"] = [
            (idlib.nodeid_b2a(serverid), base32.b2a(storage_index), shnum)
            for (serverid, storage_index, shnum) in res.get_corrupt_shares()
        ]

        remaining_corrupt = [(idlib.nodeid_b2a(serverid),
                              base32.b2a(storage_index), shnum)
                             for (serverid, storage_index,
                                  shnum) in res.get_remaining_corrupt_shares()]
        data["list-remaining-corrupt-shares"] = remaining_corrupt

        unhealthy = [(path_t, json_check_results(crr.get_pre_repair_results()))
                     for (path_t, crr) in res.get_all_results().items()
                     if not crr.get_pre_repair_results().is_healthy()]
        data["list-unhealthy-files"] = unhealthy
        data["stats"] = res.get_stats()
        return simplejson.dumps(data, indent=1) + "\n"
Exemple #9
0
 def __init__(self, introducer_node, introducer_service):
     super(IntroducerRootElement, self).__init__()
     self.introducer_node = introducer_node
     self.introducer_service = introducer_service
     self.node_data_dict = {
         "my_nodeid": idlib.nodeid_b2a(self.introducer_node.nodeid),
         "version": get_package_versions_string(),
         "import_path": str(allmydata).replace("/", "/ "),  # XXX kludge for wrapping
         "rendered_at": render_time(time.time()),
     }
Exemple #10
0
    def json(self, ctx):
        inevow.IRequest(ctx).setHeader("content-type", "text/plain")
        res = self.monitor.get_status()
        data = {}
        data["finished"] = self.monitor.is_finished()
        data["root-storage-index"] = res.get_root_storage_index_string()
        c = res.get_counters()
        data["count-objects-checked"] = c["count-objects-checked"]

        data["count-objects-healthy-pre-repair"] = c["count-objects-healthy-pre-repair"]
        data["count-objects-unhealthy-pre-repair"] = c["count-objects-unhealthy-pre-repair"]
        data["count-objects-healthy-post-repair"] = c["count-objects-healthy-post-repair"]
        data["count-objects-unhealthy-post-repair"] = c["count-objects-unhealthy-post-repair"]

        data["count-repairs-attempted"] = c["count-repairs-attempted"]
        data["count-repairs-successful"] = c["count-repairs-successful"]
        data["count-repairs-unsuccessful"] = c["count-repairs-unsuccessful"]

        data["count-corrupt-shares-pre-repair"] = c["count-corrupt-shares-pre-repair"]
        data["count-corrupt-shares-post-repair"] = c["count-corrupt-shares-pre-repair"]

        data["list-corrupt-shares"] = [ (idlib.nodeid_b2a(serverid),
                                         base32.b2a(storage_index),
                                         shnum)
                                        for (serverid, storage_index, shnum)
                                        in res.get_corrupt_shares() ]

        remaining_corrupt = [ (idlib.nodeid_b2a(serverid),
                               base32.b2a(storage_index),
                               shnum)
                              for (serverid, storage_index, shnum)
                              in res.get_remaining_corrupt_shares() ]
        data["list-remaining-corrupt-shares"] = remaining_corrupt

        unhealthy = [ (path_t,
                       json_check_results(crr.get_pre_repair_results()))
                      for (path_t, crr)
                      in res.get_all_results().items()
                      if not crr.get_pre_repair_results().is_healthy() ]
        data["list-unhealthy-files"] = unhealthy
        data["stats"] = res.get_stats()
        return simplejson.dumps(data, indent=1) + "\n"
Exemple #11
0
 def _check2(flattened_bytes):
     text = flattened_bytes.decode("utf-8")
     self.assertIn(NICKNAME % "0", text) # a v2 client
     self.assertIn(NICKNAME % "1", text) # another v2 client
     for i in range(NUM_STORAGE):
         self.assertIn(printable_serverids[i], text,
                           (i,printable_serverids[i],text))
         # make sure there isn't a double-base32ed string too
         self.assertNotIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                       (i,printable_serverids[i],text))
     log.msg("_check2 done")
        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [clients[2], # boring
                         ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # a v2 client
            self.failUnlessIn(NICKNAME % "1", text) # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
Exemple #13
0
        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [clients[2], # boring
                         ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # a v2 client
            self.failUnlessIn(NICKNAME % "1", text) # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
Exemple #14
0
def json_check_counts(d):
    r = {}
    r["count-shares-good"] = d["count-shares-good"]
    r["count-shares-needed"] = d["count-shares-needed"]
    r["count-shares-expected"] = d["count-shares-expected"]
    r["count-good-share-hosts"] = d["count-good-share-hosts"]
    r["count-corrupt-shares"] = d["count-corrupt-shares"]
    r["list-corrupt-shares"] = [ (idlib.nodeid_b2a(serverid),
                                  base32.b2a(si), shnum)
                                 for (serverid, si, shnum)
                                 in d["list-corrupt-shares"] ]
    r["servers-responding"] = [idlib.nodeid_b2a(serverid)
                               for serverid in d["servers-responding"]]
    sharemap = {}
    for (shareid, serverids) in d["sharemap"].items():
        sharemap[shareid] = [idlib.nodeid_b2a(serverid)
                             for serverid in serverids]
    r["sharemap"] = sharemap

    r["count-wrong-shares"] = d["count-wrong-shares"]
    r["count-recoverable-versions"] = d["count-recoverable-versions"]
    r["count-unrecoverable-versions"] = d["count-unrecoverable-versions"]

    return r
Exemple #15
0
 def get_announcements(self, include_stub_clients=True):
     announcements = []
     for index, (ann_t, when) in self._announcements.items():
         (furl, service_name, ri_name, nickname, ver, oldest) = ann_t
         if service_name == "stub_client" and not include_stub_clients:
             continue
         ann_d = {"nickname": nickname.decode("utf-8", "replace"),
                  "my-version": ver,
                  "service-name": service_name,
                  "anonymous-storage-FURL": furl,
                  }
         # the V2 introducer uses (service_name, key_s, tubid_s) as an
         # index, so match that format for AnnouncementDescriptor
         new_index = (index[0], None, idlib.nodeid_b2a(index[1]))
         ad = AnnouncementDescriptor(when, new_index, None, ann_d)
         announcements.append(ad)
     return announcements
Exemple #16
0
    def _try_to_validate_privkey(self, enc_privkey, peerid, shnum, lp):

        alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
        alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
        if alleged_writekey != self._node.get_writekey():
            self.log("invalid privkey from %s shnum %d" %
                     (idlib.nodeid_b2a(peerid)[:8], shnum),
                     parent=lp, level=log.WEIRD, umid="YIw4tA")
            return

        # it's good
        self.log("got valid privkey from shnum %d on peerid %s" %
                 (shnum, idlib.shortnodeid_b2a(peerid)),
                 parent=lp)
        privkey = rsa.create_signing_key_from_string(alleged_privkey_s)
        self._node._populate_encprivkey(enc_privkey)
        self._node._populate_privkey(privkey)
        self._need_privkey = False
Exemple #17
0
    def _try_to_validate_privkey(self, enc_privkey, peerid, shnum, lp):

        alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
        alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
        if alleged_writekey != self._node.get_writekey():
            self.log("invalid privkey from %s shnum %d" %
                     (idlib.nodeid_b2a(peerid)[:8], shnum),
                     parent=lp,
                     level=log.WEIRD,
                     umid="YIw4tA")
            return

        # it's good
        self.log("got valid privkey from shnum %d on peerid %s" %
                 (shnum, idlib.shortnodeid_b2a(peerid)),
                 parent=lp)
        privkey = rsa.create_signing_key_from_string(alleged_privkey_s)
        self._node._populate_encprivkey(enc_privkey)
        self._node._populate_privkey(privkey)
        self._need_privkey = False
Exemple #18
0
 def renew_lease(self, renew_secret, new_expire_time):
     accepting_nodeids = set()
     with open(self.home, 'rb+') as f:
         for (leasenum, lease) in self._enumerate_leases(f):
             if timing_safe_compare(lease.renew_secret, renew_secret):
                 # yup. See if we need to update the owner time.
                 if new_expire_time > lease.expiration_time:
                     # yes
                     lease.expiration_time = new_expire_time
                     self._write_lease_record(f, leasenum, lease)
                 return
             accepting_nodeids.add(lease.nodeid)
     # Return the accepting_nodeids set, to give the client a chance to
     # update the leases on a share which has been migrated from its
     # original server to a new one.
     msg = ("Unable to renew non-existent lease. I have leases accepted by"
            " nodeids: ")
     msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                      for anid in accepting_nodeids])
     msg += " ."
     raise IndexError(msg)
Exemple #19
0
 def renew_lease(self, renew_secret, new_expire_time):
     accepting_nodeids = set()
     f = open(self.home, 'rb+')
     for (leasenum,lease) in self._enumerate_leases(f):
         if timing_safe_compare(lease.renew_secret, renew_secret):
             # yup. See if we need to update the owner time.
             if new_expire_time > lease.expiration_time:
                 # yes
                 lease.expiration_time = new_expire_time
                 self._write_lease_record(f, leasenum, lease)
             f.close()
             return
         accepting_nodeids.add(lease.nodeid)
     f.close()
     # Return the accepting_nodeids set, to give the client a chance to
     # update the leases on a share which has been migrated from its
     # original server to a new one.
     msg = ("Unable to renew non-existent lease. I have leases accepted by"
            " nodeids: ")
     msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                      for anid in accepting_nodeids])
     msg += " ."
     raise IndexError(msg)
Exemple #20
0
    def cancel_lease(self, cancel_secret):
        """Remove any leases with the given cancel_secret. If the last lease
        is cancelled, the file will be removed. Return the number of bytes
        that were freed (by truncating the list of leases, and possibly by
        deleting the file. Raise IndexError if there was no lease with the
        given cancel_secret."""

        accepting_nodeids = set()
        modified = 0
        remaining = 0
        blank_lease = LeaseInfo(owner_num=0,
                                renew_secret="\x00"*32,
                                cancel_secret="\x00"*32,
                                expiration_time=0,
                                nodeid="\x00"*20)
        f = open(self.home, 'rb+')
        for (leasenum,lease) in self._enumerate_leases(f):
            accepting_nodeids.add(lease.nodeid)
            if timing_safe_compare(lease.cancel_secret, cancel_secret):
                self._write_lease_record(f, leasenum, blank_lease)
                modified += 1
            else:
                remaining += 1
        if modified:
            freed_space = self._pack_leases(f)
            f.close()
            if not remaining:
                freed_space += os.stat(self.home)[stat.ST_SIZE]
                self.unlink()
            return freed_space

        msg = ("Unable to cancel non-existent lease. I have leases "
               "accepted by nodeids: ")
        msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                         for anid in accepting_nodeids])
        msg += " ."
        raise IndexError(msg)
Exemple #21
0
    def cancel_lease(self, cancel_secret):
        """Remove any leases with the given cancel_secret. If the last lease
        is cancelled, the file will be removed. Return the number of bytes
        that were freed (by truncating the list of leases, and possibly by
        deleting the file. Raise IndexError if there was no lease with the
        given cancel_secret."""

        accepting_nodeids = set()
        modified = 0
        remaining = 0
        blank_lease = LeaseInfo(owner_num=0,
                                renew_secret="\x00" * 32,
                                cancel_secret="\x00" * 32,
                                expiration_time=0,
                                nodeid="\x00" * 20)
        with open(self.home, 'rb+') as f:
            for (leasenum, lease) in self._enumerate_leases(f):
                accepting_nodeids.add(lease.nodeid)
                if timing_safe_compare(lease.cancel_secret, cancel_secret):
                    self._write_lease_record(f, leasenum, blank_lease)
                    modified += 1
                else:
                    remaining += 1
            if modified:
                freed_space = self._pack_leases(f)
                f.close()
                if not remaining:
                    freed_space += os.stat(self.home)[stat.ST_SIZE]
                    self.unlink()
                return freed_space

        msg = ("Unable to cancel non-existent lease. I have leases "
               "accepted by nodeids: ")
        msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                         for anid in accepting_nodeids])
        msg += " ."
        raise IndexError(msg)
Exemple #22
0
 def nodeid(self, req, tag):
     return tag(idlib.nodeid_b2a(self._storage.my_nodeid))
Exemple #23
0
    def _fill_checker_results(self, smap, r):
        self._monitor.raise_if_cancelled()
        r.set_servermap(smap.copy())
        healthy = True
        data = {}
        report = []
        summary = []
        vmap = smap.make_versionmap()
        recoverable = smap.recoverable_versions()
        unrecoverable = smap.unrecoverable_versions()
        data["count-recoverable-versions"] = len(recoverable)
        data["count-unrecoverable-versions"] = len(unrecoverable)

        if recoverable:
            report.append("Recoverable Versions: " +
                          "/".join(["%d*%s" % (len(vmap[v]),
                                               smap.summarize_version(v))
                                    for v in recoverable]))
        if unrecoverable:
            report.append("Unrecoverable Versions: " +
                          "/".join(["%d*%s" % (len(vmap[v]),
                                               smap.summarize_version(v))
                                    for v in unrecoverable]))
        if smap.unrecoverable_versions():
            healthy = False
            summary.append("some versions are unrecoverable")
            report.append("Unhealthy: some versions are unrecoverable")
        if len(recoverable) == 0:
            healthy = False
            summary.append("no versions are recoverable")
            report.append("Unhealthy: no versions are recoverable")
        if len(recoverable) > 1:
            healthy = False
            summary.append("multiple versions are recoverable")
            report.append("Unhealthy: there are multiple recoverable versions")

        needs_rebalancing = False
        if recoverable:
            best_version = smap.best_recoverable_version()
            report.append("Best Recoverable Version: " +
                          smap.summarize_version(best_version))
            counters = self._count_shares(smap, best_version)
            data.update(counters)
            s = counters["count-shares-good"]
            k = counters["count-shares-needed"]
            N = counters["count-shares-expected"]
            if s < N:
                healthy = False
                report.append("Unhealthy: best version has only %d shares "
                              "(encoding is %d-of-%d)" % (s, k, N))
                summary.append("%d shares (enc %d-of-%d)" % (s, k, N))
            hosts = smap.all_peers_for_version(best_version)
            needs_rebalancing = bool( len(hosts) < N )
        elif unrecoverable:
            healthy = False
            # find a k and N from somewhere
            first = list(unrecoverable)[0]
            # not exactly the best version, but that doesn't matter too much
            data.update(self._count_shares(smap, first))
            # leave needs_rebalancing=False: the file being unrecoverable is
            # the bigger problem
        else:
            # couldn't find anything at all
            data["count-shares-good"] = 0
            data["count-shares-needed"] = 3 # arbitrary defaults
            data["count-shares-expected"] = 10
            data["count-good-share-hosts"] = 0
            data["count-wrong-shares"] = 0

        if self.bad_shares:
            data["count-corrupt-shares"] = len(self.bad_shares)
            data["list-corrupt-shares"] = locators = []
            report.append("Corrupt Shares:")
            summary.append("Corrupt Shares:")
            for (peerid, shnum, f) in sorted(self.bad_shares):
                locators.append( (peerid, self._storage_index, shnum) )
                s = "%s-sh%d" % (idlib.shortnodeid_b2a(peerid), shnum)
                if f.check(CorruptShareError):
                    ft = f.value.reason
                else:
                    ft = str(f)
                report.append(" %s: %s" % (s, ft))
                summary.append(s)
                p = (peerid, self._storage_index, shnum, f)
                r.problems.append(p)
                msg = ("CorruptShareError during mutable verify, "
                       "peerid=%(peerid)s, si=%(si)s, shnum=%(shnum)d, "
                       "where=%(where)s")
                log.msg(format=msg, peerid=idlib.nodeid_b2a(peerid),
                        si=base32.b2a(self._storage_index),
                        shnum=shnum,
                        where=ft,
                        level=log.WEIRD, umid="EkK8QA")
        else:
            data["count-corrupt-shares"] = 0
            data["list-corrupt-shares"] = []

        sharemap = {}
        for verinfo in vmap:
            for (shnum, peerid, timestamp) in vmap[verinfo]:
                shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum)
                if shareid not in sharemap:
                    sharemap[shareid] = []
                sharemap[shareid].append(peerid)
        data["sharemap"] = sharemap
        data["servers-responding"] = list(smap.reachable_peers)

        r.set_healthy(healthy)
        r.set_recoverable(bool(recoverable))
        r.set_needs_rebalancing(needs_rebalancing)
        r.set_data(data)
        if healthy:
            r.set_summary("Healthy")
        else:
            r.set_summary("Unhealthy: " + " ".join(summary))
        r.set_report(report)
Exemple #24
0
 def data_my_nodeid(self, ctx, data):
     return idlib.nodeid_b2a(self.client.nodeid)
Exemple #25
0
 def get_long_tubid(self):
     return idlib.nodeid_b2a(self.nodeid)
Exemple #26
0
 def test_nodeid_b2a(self):
     self.failUnlessEqual(idlib.nodeid_b2a(b"\x00" * 20), "a" * 32)
Exemple #27
0
    def _fill_checker_results(self, smap, r):
        self._monitor.raise_if_cancelled()
        r.set_servermap(smap.copy())
        healthy = True
        data = {}
        report = []
        summary = []
        vmap = smap.make_versionmap()
        recoverable = smap.recoverable_versions()
        unrecoverable = smap.unrecoverable_versions()
        data["count-recoverable-versions"] = len(recoverable)
        data["count-unrecoverable-versions"] = len(unrecoverable)

        if recoverable:
            report.append("Recoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in recoverable
            ]))
        if unrecoverable:
            report.append("Unrecoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in unrecoverable
            ]))
        if smap.unrecoverable_versions():
            healthy = False
            summary.append("some versions are unrecoverable")
            report.append("Unhealthy: some versions are unrecoverable")
        if len(recoverable) == 0:
            healthy = False
            summary.append("no versions are recoverable")
            report.append("Unhealthy: no versions are recoverable")
        if len(recoverable) > 1:
            healthy = False
            summary.append("multiple versions are recoverable")
            report.append("Unhealthy: there are multiple recoverable versions")

        needs_rebalancing = False
        if recoverable:
            best_version = smap.best_recoverable_version()
            report.append("Best Recoverable Version: " +
                          smap.summarize_version(best_version))
            counters = self._count_shares(smap, best_version)
            data.update(counters)
            s = counters["count-shares-good"]
            k = counters["count-shares-needed"]
            N = counters["count-shares-expected"]
            if s < N:
                healthy = False
                report.append("Unhealthy: best version has only %d shares "
                              "(encoding is %d-of-%d)" % (s, k, N))
                summary.append("%d shares (enc %d-of-%d)" % (s, k, N))
            hosts = smap.all_peers_for_version(best_version)
            needs_rebalancing = bool(len(hosts) < N)
        elif unrecoverable:
            healthy = False
            # find a k and N from somewhere
            first = list(unrecoverable)[0]
            # not exactly the best version, but that doesn't matter too much
            data.update(self._count_shares(smap, first))
            # leave needs_rebalancing=False: the file being unrecoverable is
            # the bigger problem
        else:
            # couldn't find anything at all
            data["count-shares-good"] = 0
            data["count-shares-needed"] = 3  # arbitrary defaults
            data["count-shares-expected"] = 10
            data["count-good-share-hosts"] = 0
            data["count-wrong-shares"] = 0

        if self.bad_shares:
            data["count-corrupt-shares"] = len(self.bad_shares)
            data["list-corrupt-shares"] = locators = []
            report.append("Corrupt Shares:")
            summary.append("Corrupt Shares:")
            for (peerid, shnum, f) in sorted(self.bad_shares):
                locators.append((peerid, self._storage_index, shnum))
                s = "%s-sh%d" % (idlib.shortnodeid_b2a(peerid), shnum)
                if f.check(CorruptShareError):
                    ft = f.value.reason
                else:
                    ft = str(f)
                report.append(" %s: %s" % (s, ft))
                summary.append(s)
                p = (peerid, self._storage_index, shnum, f)
                r.problems.append(p)
                msg = ("CorruptShareError during mutable verify, "
                       "peerid=%(peerid)s, si=%(si)s, shnum=%(shnum)d, "
                       "where=%(where)s")
                log.msg(format=msg,
                        peerid=idlib.nodeid_b2a(peerid),
                        si=base32.b2a(self._storage_index),
                        shnum=shnum,
                        where=ft,
                        level=log.WEIRD,
                        umid="EkK8QA")
        else:
            data["count-corrupt-shares"] = 0
            data["list-corrupt-shares"] = []

        sharemap = {}
        for verinfo in vmap:
            for (shnum, peerid, timestamp) in vmap[verinfo]:
                shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum)
                if shareid not in sharemap:
                    sharemap[shareid] = []
                sharemap[shareid].append(peerid)
        data["sharemap"] = sharemap
        data["servers-responding"] = list(smap.reachable_peers)

        r.set_healthy(healthy)
        r.set_recoverable(bool(recoverable))
        r.set_needs_rebalancing(needs_rebalancing)
        r.set_data(data)
        if healthy:
            r.set_summary("Healthy")
        else:
            r.set_summary("Unhealthy: " + " ".join(summary))
        r.set_report(report)
Exemple #28
0
 def data_my_nodeid(self, ctx, data):
     return idlib.nodeid_b2a(self.introducer_node.nodeid)
Exemple #29
0
 def test_nodeid_b2a(self):
     result = idlib.nodeid_b2a(b"\x00" * 20)
     self.assertEqual(result, "a" * 32)
     self.assertIsInstance(result, str)
Exemple #30
0
 def __repr__(self):
     if self._nodeid:
         nodeid_s = idlib.nodeid_b2a(self._nodeid)
     else:
         nodeid_s = "[None]"
     return "<WriteBucketProxy for node %s>" % nodeid_s
        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            if server_version == V1:
                # each storage server publishes a record, and (after its
                # 'subscribe' has been ACKed) also publishes a "stub_client".
                # The non-storage client (which subscribes) also publishes a
                # stub_client. There is also one "boring" service. The number
                # of messages is higher, because the stub_clients aren't
                # published until after we get the 'subscribe' ack (since we
                # don't realize that we're dealing with a v1 server [which
                # needs stub_clients] until then), and the act of publishing
                # the stub_client causes us to re-send all previous
                # announcements.
                self.failUnlessEqual(dc["inbound_message"] - dc["inbound_duplicate"],
                                     NUM_STORAGE + NUM_CLIENTS + 1)
            else:
                # each storage server publishes a record. There is also one
                # "stub_client" and one "boring"
                self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+2)
                self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                nodeid0 = tubs[clients[0]].tubID
                ann = anns[nodeid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            if server_version == V1:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1 # storage
                    if c is clients[2]:
                        expected += 1 # boring
                    if c is not clients[0]:
                        # the v2 client tries to call publish_v2, which fails
                        # because the server is v1. It then re-sends
                        # everything it has so far, plus a stub_client record
                        expected = 2*expected + 1
                    if c is clients[0]:
                        # we always tell v1 client to send stub_client
                        expected += 1
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            else:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1
                    if c in [clients[0], # stub_client
                             clients[2], # boring
                             ]:
                        expected = 2
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # the v1 client
            self.failUnlessIn(NICKNAME % "1", text) # a v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
Exemple #32
0
 def longname(self):
     return idlib.nodeid_b2a(self._tubid)
Exemple #33
0
 def data_my_nodeid(self, ctx, data):
     return idlib.nodeid_b2a(self.introducer_node.nodeid)
Exemple #34
0
 def __str__(self):
     short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
     return "<CorruptShareError peerid=%s shnum[%d]: %s" % (short_peerid,
                                                            self.shnum,
                                                            self.reason)
Exemple #35
0
 def data_nodeid(self, ctx, storage):
     return idlib.nodeid_b2a(self.storage.my_nodeid)
Exemple #36
0
 def get_longname(self):
     return idlib.nodeid_b2a(self.serverid)
Exemple #37
0
 def get_longname(self):
     return idlib.nodeid_b2a(self.serverid)
Exemple #38
0
 def data_nodeid(self, ctx, storage):
     return idlib.nodeid_b2a(self.storage.my_nodeid)
Exemple #39
0
 def get_long_tubid(self):
     return idlib.nodeid_b2a(self.nodeid)
Exemple #40
0
 def __str__(self):
     short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
     return "<CorruptShareError peerid=%s shnum[%d]: %s" % (
         short_peerid, self.shnum, self.reason)
Exemple #41
0
        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            if server_version == V1:
                # each storage server publishes a record, and (after its
                # 'subscribe' has been ACKed) also publishes a "stub_client".
                # The non-storage client (which subscribes) also publishes a
                # stub_client. There is also one "boring" service. The number
                # of messages is higher, because the stub_clients aren't
                # published until after we get the 'subscribe' ack (since we
                # don't realize that we're dealing with a v1 server [which
                # needs stub_clients] until then), and the act of publishing
                # the stub_client causes us to re-send all previous
                # announcements.
                self.failUnlessEqual(
                    dc["inbound_message"] - dc["inbound_duplicate"],
                    NUM_STORAGE + NUM_CLIENTS + 1)
            else:
                # each storage server publishes a record. There is also one
                # "stub_client" and one "boring"
                self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 2)
                self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                nodeid0 = tubs[clients[0]].tubID
                ann = anns[nodeid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            if server_version == V1:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1  # storage
                    if c is clients[2]:
                        expected += 1  # boring
                    if c is not clients[0]:
                        # the v2 client tries to call publish_v2, which fails
                        # because the server is v1. It then re-sends
                        # everything it has so far, plus a stub_client record
                        expected = 2 * expected + 1
                    if c is clients[0]:
                        # we always tell v1 client to send stub_client
                        expected += 1
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            else:
                for c in publishing_clients:
                    cdc = c._debug_counts
                    expected = 1
                    if c in [
                            clients[0],  # stub_client
                            clients[2],  # boring
                    ]:
                        expected = 2
                    self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text)  # the v1 client
            self.failUnlessIn(NICKNAME % "1", text)  # a v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i, printable_serverids[i], text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i, printable_serverids[i], text))
            log.msg("_check1 done")
Exemple #42
0
 def data_my_nodeid(self, ctx, data):
     return idlib.nodeid_b2a(self.client.nodeid)