Beispiel #1
0
def sign_to_foolscap(ann, sk):
    # return (bytes, sig-str, pubkey-str). A future HTTP-based serialization
    # will use JSON({msg:b64(JSON(msg).utf8), sig:v0-b64(sig),
    # pubkey:v0-b64(pubkey)}) .
    msg = json.dumps(ann).encode("utf-8")
    sig = "v0-"+base32.b2a(sk.sign(msg))
    vk_bytes = sk.get_verifying_key_bytes()
    ann_t = (msg, sig, "v0-"+base32.b2a(vk_bytes))
    return ann_t
Beispiel #2
0
 def test_secrets(self):
     basedir = "test_client.Basic.test_secrets"
     os.mkdir(basedir)
     open(os.path.join(basedir, "introducer.furl"), "w").write("")
     c = client.Client(basedir)
     secret_fname = os.path.join(basedir, "private", "secret")
     self.failUnless(os.path.exists(secret_fname), secret_fname)
     renew_secret = c.get_renewal_secret()
     self.failUnless(base32.b2a(renew_secret))
     cancel_secret = c.get_cancel_secret()
     self.failUnless(base32.b2a(cancel_secret))
Beispiel #3
0
    def to_string(self):
        assert isinstance(self.needed_shares, int)
        assert isinstance(self.total_shares, int)
        assert isinstance(self.size, (int,long))

        return ('URI:CHK:%s:%s:%d:%d:%d' %
                (base32.b2a(self.key),
                 base32.b2a(self.uri_extension_hash),
                 self.needed_shares,
                 self.total_shares,
                 self.size))
Beispiel #4
0
 def test_secrets(self):
     basedir = "test_client.Basic.test_secrets"
     os.mkdir(basedir)
     fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
     c = client.Client(basedir)
     secret_fname = os.path.join(basedir, "private", "secret")
     self.failUnless(os.path.exists(secret_fname), secret_fname)
     renew_secret = c.get_renewal_secret()
     self.failUnless(base32.b2a(renew_secret))
     cancel_secret = c.get_cancel_secret()
     self.failUnless(base32.b2a(cancel_secret))
Beispiel #5
0
 def _check_integrity(self, data):
     h = uri_extension_hash(data)
     if h != self._verifycap.uri_extension_hash:
         msg = ("The copy of uri_extension we received from %s was bad: wanted %s, got %s" %
                (self._readbucketproxy,
                 base32.b2a(self._verifycap.uri_extension_hash),
                 base32.b2a(h)))
         if self._fetch_failures is not None:
             self._fetch_failures["uri_extension"] += 1
         raise BadURIExtensionHashValue(msg)
     else:
         return data
Beispiel #6
0
def make_storagebroker(s=None, num_peers=10):
    if not s:
        s = FakeStorage()
    peerids = [tagged_hash("peerid", "%d" % i)[:20]
               for i in range(num_peers)]
    storage_broker = StorageFarmBroker(True, None)
    for peerid in peerids:
        fss = FakeStorageServer(peerid, s)
        ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
               "permutation-seed-base32": base32.b2a(peerid) }
        storage_broker.test_add_rref(peerid, fss, ann)
    return storage_broker
Beispiel #7
0
def dump_mutable_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util import base32, idlib
    out = options.stdout
    m = MutableShareFile(options['filename'])
    f = open(options['filename'], "rb")
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    num_extra_leases = m._read_num_extra_leases(f)
    data_length = m._read_data_length(f)
    extra_lease_offset = m._read_extra_lease_offset(f)
    container_size = extra_lease_offset - m.DATA_OFFSET
    leases = list(m._enumerate_leases(f))

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == "\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == "\x01":
        share_type = "MDMF"
    f.close()

    print >>out
    print >>out, "Mutable slot found:"
    print >>out, " share_type: %s" % share_type
    print >>out, " write_enabler: %s" % base32.b2a(WE)
    print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
    print >>out, " num_extra_leases: %d" % num_extra_leases
    print >>out, " container_size: %d" % container_size
    print >>out, " data_length: %d" % data_length
    if leases:
        for (leasenum, lease) in leases:
            print >>out
            print >>out, " Lease #%d:" % leasenum
            print >>out, "  ownerid: %d" % lease.owner_num
            when = format_expiration_time(lease.expiration_time)
            print >>out, "  expires in %s" % when
            print >>out, "  renew_secret: %s" % base32.b2a(lease.renew_secret)
            print >>out, "  cancel_secret: %s" % base32.b2a(lease.cancel_secret)
            print >>out, "  secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
    else:
        print >>out, "No leases."
    print >>out

    if share_type == "SDMF":
        dump_SDMF_share(m, data_length, options)
    elif share_type == "MDMF":
        dump_MDMF_share(m, data_length, options)

    return 0
Beispiel #8
0
    def test_pack(self):
        data = {"stuff": "value", "size": 12, "needed_shares": 3, "big_hash": hashutil.tagged_hash("foo", "bar")}
        ext = uri.pack_extension(data)
        d = uri.unpack_extension(ext)
        self.failUnlessReallyEqual(d["stuff"], "value")
        self.failUnlessReallyEqual(d["size"], 12)
        self.failUnlessReallyEqual(d["big_hash"], hashutil.tagged_hash("foo", "bar"))

        readable = uri.unpack_extension_readable(ext)
        self.failUnlessReallyEqual(readable["needed_shares"], 3)
        self.failUnlessReallyEqual(readable["stuff"], "value")
        self.failUnlessReallyEqual(readable["size"], 12)
        self.failUnlessReallyEqual(readable["big_hash"], base32.b2a(hashutil.tagged_hash("foo", "bar")))
        self.failUnlessReallyEqual(readable["UEB_hash"], base32.b2a(hashutil.uri_extension_hash(ext)))
Beispiel #9
0
 def _clobber_shares(ignored):
     # delete one, corrupt a second
     shares = self.find_uri_shares(self.uri)
     self.failUnlessReallyEqual(len(shares), 10)
     os.unlink(shares[0][2])
     cso = debug.CorruptShareOptions()
     cso.stdout = StringIO()
     cso.parseOptions([shares[1][2]])
     storage_index = uri.from_string(self.uri).get_storage_index()
     self._corrupt_share_line = "  server %s, SI %s, shnum %d" % \
                                (base32.b2a(shares[1][1]),
                                 base32.b2a(storage_index),
                                 shares[1][0])
     debug.corrupt_share(cso)
Beispiel #10
0
    def run(self, options):
        stderr = options.stderr
        self.options = options
        self.ophandle = ophandle = base32.b2a(os.urandom(16))
        nodeurl = options['node-url']
        if not nodeurl.endswith("/"):
            nodeurl += "/"
        self.nodeurl = nodeurl
        where = options.where
        try:
            rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
        except UnknownAliasError as e:
            e.display(stderr)
            return 1
        if path == '/':
            path = ''
        url = nodeurl + "uri/%s" % urllib.quote(rootcap)
        if path:
            url += "/" + escape_path(path)
        # todo: should it end with a slash?
        url = self.make_url(url, ophandle)
        resp = do_http("POST", url)
        if resp.status not in (200, 302):
            print(format_http_error("ERROR", resp), file=stderr)
            return 1
        # now we poll for results. We nominally poll at t=1, 5, 10, 30, 60,
        # 90, k*120 seconds, but if the poll takes non-zero time, that will
        # be slightly longer. I'm not worried about trying to make up for
        # that time.

        return self.wait_for_results()
Beispiel #11
0
 def abbrev_verinfo_dict(self, verinfo_d):
     output = {}
     for verinfo,value in verinfo_d.items():
         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
          offsets_tuple) = verinfo
         output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
     return output
Beispiel #12
0
    def get_segment(self, segnum, logparent=None):
        """Begin downloading a segment. I return a tuple (d, c): 'd' is a
        Deferred that fires with (offset,data) when the desired segment is
        available, and c is an object on which c.cancel() can be called to
        disavow interest in the segment (after which 'd' will never fire).

        You probably need to know the segment size before calling this,
        unless you want the first few bytes of the file. If you ask for a
        segment number which turns out to be too large, the Deferred will
        errback with BadSegmentNumberError.

        The Deferred fires with the offset of the first byte of the data
        segment, so that you can call get_segment() before knowing the
        segment size, and still know which data you received.

        The Deferred can also errback with other fatal problems, such as
        NotEnoughSharesError, NoSharesError, or BadCiphertextHashError.
        """
        lp = log.msg(format="imm Node(%(si)s).get_segment(%(segnum)d)",
                     si=base32.b2a(self._verifycap.storage_index)[:8],
                     segnum=segnum,
                     level=log.OPERATIONAL, parent=logparent, umid="UKFjDQ")
        self._download_status.add_segment_request(segnum, now())
        d = defer.Deferred()
        c = Cancel(self._cancel_request)
        self._segment_requests.append( (segnum, d, c, lp) )
        self._start_new_segment()
        return (d, c)
Beispiel #13
0
 def create_fake_client(self):
     sb = StorageFarmBroker(None, True)
     # s.get_name() (the "short description") will be "v0-00000000".
     # s.get_longname() will include the -long suffix.
     # s.get_peerid() (i.e. tubid) will be "aaa.." or "777.." or "ceir.."
     servers = [("v0-00000000-long", "\x00"*20, "peer-0"),
                ("v0-ffffffff-long", "\xff"*20, "peer-f"),
                ("v0-11111111-long", "\x11"*20, "peer-11")]
     for (key_s, peerid, nickname) in servers:
         tubid_b32 = base32.b2a(peerid)
         furl = "pb://%s@nowhere/fake" % tubid_b32
         ann = { "version": 0,
                 "service-name": "storage",
                 "anonymous-storage-FURL": furl,
                 "permutation-seed-base32": "",
                 "nickname": unicode(nickname),
                 "app-versions": {}, # need #466 and v2 introducer
                 "my-version": "ver",
                 "oldest-supported": "oldest",
                 }
         s = NativeStorageServer(key_s, ann)
         sb.test_add_server(peerid, s) # XXX: maybe use key_s?
     c = FakeClient()
     c.storage_broker = sb
     return c
Beispiel #14
0
def unpack_extension_readable(data):
    unpacked = unpack_extension(data)
    unpacked["UEB_hash"] = hashutil.uri_extension_hash(data)
    for k in sorted(unpacked.keys()):
        if 'hash' in k:
            unpacked[k] = base32.b2a(unpacked[k])
    return unpacked
    def test_static_servers(self):
        broker = StorageFarmBroker(True, lambda h: Mock())

        key_s = 'v0-1234-1'
        servers_yaml = """\
storage:
  v0-1234-1:
    ann:
      anonymous-storage-FURL: pb://ge@nowhere/fake
      permutation-seed-base32: aaaaaaaaaaaaaaaaaaaaaaaa
"""
        servers = yamlutil.safe_load(servers_yaml)
        permseed = base32.a2b("aaaaaaaaaaaaaaaaaaaaaaaa")
        broker.set_static_servers(servers["storage"])
        self.failUnlessEqual(len(broker._static_server_ids), 1)
        s = broker.servers[key_s]
        self.failUnlessEqual(s.announcement,
                             servers["storage"]["v0-1234-1"]["ann"])
        self.failUnlessEqual(s.get_serverid(), key_s)
        self.assertEqual(s.get_permutation_seed(), permseed)

        # if the Introducer announces the same thing, we're supposed to
        # ignore it

        ann2 = {
            "service-name": "storage",
            "anonymous-storage-FURL": "pb://{}@nowhere/fake2".format(base32.b2a(str(1))),
            "permutation-seed-base32": "bbbbbbbbbbbbbbbbbbbbbbbb",
        }
        broker._got_announcement(key_s, ann2)
        s2 = broker.servers[key_s]
        self.assertIdentical(s2, s)
        self.assertEqual(s2.get_permutation_seed(), permseed)
Beispiel #16
0
    def add_node(self, node, path):
        dirnode.DeepStats.add_node(self, node, path)
        data = {"path": path,
                "cap": node.get_uri()}

        if IDirectoryNode.providedBy(node):
            data["type"] = "directory"
        elif IFileNode.providedBy(node):
            data["type"] = "file"
        else:
            data["type"] = "unknown"

        v = node.get_verify_cap()
        if v:
            v = v.to_string()
        data["verifycap"] = v or ""

        r = node.get_repair_cap()
        if r:
            r = r.to_string()
        data["repaircap"] = r or ""

        si = node.get_storage_index()
        if si:
            si = base32.b2a(si)
        data["storage-index"] = si or ""

        if self.repair:
            d = node.check_and_repair(self.monitor, self.verify, self.add_lease)
            d.addCallback(self.add_check_and_repair, data)
        else:
            d = node.check(self.monitor, self.verify, self.add_lease)
            d.addCallback(self.add_check, data)
        d.addCallback(self.write_line)
        return d
Beispiel #17
0
    def add_node(self, node, path):
        dirnode.DeepStats.add_node(self, node, path)
        d = {"path": path,
             "cap": node.get_uri()}

        if IDirectoryNode.providedBy(node):
            d["type"] = "directory"
        elif IFileNode.providedBy(node):
            d["type"] = "file"
        else:
            d["type"] = "unknown"

        v = node.get_verify_cap()
        if v:
            v = v.to_string()
        d["verifycap"] = v or ""

        r = node.get_repair_cap()
        if r:
            r = r.to_string()
        d["repaircap"] = r or ""

        si = node.get_storage_index()
        if si:
            si = base32.b2a(si)
        d["storage-index"] = si or ""

        j = json.dumps(d, ensure_ascii=True)
        assert "\n" not in j
        self.req.write(j+"\n")
Beispiel #18
0
    def render_JSON(self, req):
        req.setHeader("content-type", "text/plain")
        m = self.monitor
        s = m.get_status()

        if m.origin_si:
            origin_base32 = base32.b2a(m.origin_si)
        else:
            origin_base32 = ""
        status = { "stats": s["stats"],
                   "finished": m.is_finished(),
                   "origin": origin_base32,
                   }
        if m.is_finished():
            # don't return manifest/verifycaps/SIs unless the operation is
            # done, to save on CPU/memory (both here and in the HTTP client
            # who has to unpack the JSON). Tests show that the ManifestWalker
            # needs about 1092 bytes per item, the JSON we generate here
            # requires about 503 bytes per item, and some internal overhead
            # (perhaps transport-layer buffers in twisted.web?) requires an
            # additional 1047 bytes per item.
            status.update({ "manifest": s["manifest"],
                            "verifycaps": [i for i in s["verifycaps"]],
                            "storage-index": [i for i in s["storage-index"]],
                            })
            # simplejson doesn't know how to serialize a set. We use a
            # generator that walks the set rather than list(setofthing) to
            # save a small amount of memory (4B*len) and a moderate amount of
            # CPU.
        return json.dumps(status, indent=1)
Beispiel #19
0
    def render_GET(self, ctx):
        req = IRequest(ctx)
        # This is where all of the directory-related ?t=* code goes.
        t = get_arg(req, "t", "").strip()

        # t=info contains variable ophandles, t=rename-form contains the name
        # of the child being renamed. Neither is allowed an ETag.
        FIXED_OUTPUT_TYPES =  ["", "json", "uri", "readonly-uri"]
        if not self.node.is_mutable() and t in FIXED_OUTPUT_TYPES:
            si = self.node.get_storage_index()
            if si and req.setETag('DIR:%s-%s' % (base32.b2a(si), t or "")):
                return ""

        if not t:
            # render the directory as HTML, using the docFactory and Nevow's
            # whole templating thing.
            return DirectoryAsHTML(self.node,
                                   self.client.mutable_file_default)

        if t == "json":
            return DirectoryJSONMetadata(ctx, self.node)
        if t == "info":
            return MoreInfo(self.node)
        if t == "uri":
            return DirectoryURI(ctx, self.node)
        if t == "readonly-uri":
            return DirectoryReadonlyURI(ctx, self.node)
        if t == 'rename-form':
            return RenameForm(self.node)

        raise WebError("GET directory: bad t=%s" % t)
Beispiel #20
0
    def _find_overlap(self, events, start_key, end_key):
        # given a list of event dicts, return a new list in which each event
        # has an extra "row" key (an int, starting at 0), and if appropriate
        # a "serverid" key (ascii-encoded server id), replacing the "server"
        # key. This is a hint to our JS frontend about how to overlap the
        # parts of the graph it is drawing.

        # we must always make a copy, since we're going to be adding keys
        # and don't want to change the original objects. If we're
        # stringifying serverids, we'll also be changing the serverid keys.
        new_events = []
        rows = []
        for ev in events:
            ev = ev.copy()
            if ev.has_key('server'):
                ev["serverid"] = base32.b2a(ev["server"].get_serverid())
                del ev["server"]
            # find an empty slot in the rows
            free_slot = None
            for row,finished in enumerate(rows):
                if finished is not None:
                    if ev[start_key] > finished:
                        free_slot = row
                        break
            if free_slot is None:
                free_slot = len(rows)
                rows.append(ev[end_key])
            else:
                rows[free_slot] = ev[end_key]
            ev["row"] = free_slot
            new_events.append(ev)
        return new_events
Beispiel #21
0
    def _got_results_one_share(self, shnum, data, peerid, lp):
        self.log(format="_got_results: got shnum #%(shnum)d from peerid %(peerid)s",
                 shnum=shnum,
                 peerid=idlib.shortnodeid_b2a(peerid),
                 level=log.NOISY,
                 parent=lp)

        # this might raise NeedMoreDataError, if the pubkey and signature
        # live at some weird offset. That shouldn't happen, so I'm going to
        # treat it as a bad share.
        (seqnum, root_hash, IV, k, N, segsize, datalength,
         pubkey_s, signature, prefix) = unpack_prefix_and_signature(data)

        if not self._node.get_pubkey():
            fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
            assert len(fingerprint) == 32
            if fingerprint != self._node.get_fingerprint():
                raise CorruptShareError(peerid, shnum,
                                        "pubkey doesn't match fingerprint")
            self._node._populate_pubkey(self._deserialize_pubkey(pubkey_s))

        if self._need_privkey:
            self._try_to_extract_privkey(data, peerid, shnum, lp)

        (ig_version, ig_seqnum, ig_root_hash, ig_IV, ig_k, ig_N,
         ig_segsize, ig_datalen, offsets) = unpack_header(data)
        offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] )

        verinfo = (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
                   offsets_tuple)

        if verinfo not in self._valid_versions:
            # it's a new pair. Verify the signature.
            valid = self._node.get_pubkey().verify(prefix, signature)
            if not valid:
                raise CorruptShareError(peerid, shnum, "signature is invalid")

            # ok, it's a valid verinfo. Add it to the list of validated
            # versions.
            self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
                     % (seqnum, base32.b2a(root_hash)[:4],
                        idlib.shortnodeid_b2a(peerid), shnum,
                        k, N, segsize, datalength),
                     parent=lp)
            self._valid_versions.add(verinfo)
        # We now know that this is a valid candidate verinfo.

        if (peerid, shnum) in self._servermap.bad_shares:
            # we've been told that the rest of the data in this share is
            # unusable, so don't add it to the servermap.
            self.log("but we've been told this is a bad share",
                     parent=lp, level=log.UNUSUAL)
            return verinfo

        # Add the info to our servermap.
        timestamp = time.time()
        self._servermap.add_new_share(peerid, shnum, verinfo, timestamp)
        # and the versionmap
        self.versionmap.add(verinfo, (shnum, peerid, timestamp))
        return verinfo
Beispiel #22
0
 def _got_si(si):
     if verbose: print "SI", base32.b2a(si),
     peerlist = get_permuted_peers(si)
     if peerlist[0] == target:
         # great!
         if verbose: print "  yay!"
         fn = base32.b2a(target)
         if nodes[target]:
             nickname = nodes[target].replace("/", "_")
             fn += "-" + nickname
         fn += ".txt"
         fn = os.path.join("canaries", fn)
         open(fn, "w").write(data)
         return True
     # nope, must try again
     if verbose: print "  boo"
     return False
Beispiel #23
0
 def check_storageindex(self, si):
     self.read_blacklist()
     reason = self.entries.get(si, None)
     if reason is not None:
         # log this to logs/twistd.log, since web logs go there too
         twisted_log.msg("blacklist prohibited access to SI %s: %s" %
                         (base32.b2a(si), reason))
     return reason
Beispiel #24
0
    def render_GET(self, ctx):
        req = IRequest(ctx)
        t = get_arg(req, "t", "").strip()

        # t=info contains variable ophandles, so is not allowed an ETag.
        FIXED_OUTPUT_TYPES = ["", "json", "uri", "readonly-uri"]
        if not self.node.is_mutable() and t in FIXED_OUTPUT_TYPES:
            # if the client already has the ETag then we can
            # short-circuit the whole process.
            si = self.node.get_storage_index()
            if si and req.setETag('%s-%s' % (base32.b2a(si), t or "")):
                return ""

        if not t:
            # just get the contents
            # the filename arrives as part of the URL or in a form input
            # element, and will be sent back in a Content-Disposition header.
            # Different browsers use various character sets for this name,
            # sometimes depending upon how language environment is
            # configured. Firefox sends the equivalent of
            # urllib.quote(name.encode("utf-8")), while IE7 sometimes does
            # latin-1. Browsers cannot agree on how to interpret the name
            # they see in the Content-Disposition header either, despite some
            # 11-year old standards (RFC2231) that explain how to do it
            # properly. So we assume that at least the browser will agree
            # with itself, and echo back the same bytes that we were given.
            filename = get_arg(req, "filename", self.name) or "unknown"
            d = self.node.get_best_readable_version()
            d.addCallback(lambda dn: FileDownloader(dn, filename))
            return d
        if t == "json":
            # We do this to make sure that fields like size and
            # mutable-type (which depend on the file on the grid and not
            # just on the cap) are filled in. The latter gets used in
            # tests, in particular.
            #
            # TODO: Make it so that the servermap knows how to update in
            # a mode specifically designed to fill in these fields, and
            # then update it in that mode.
            if self.node.is_mutable():
                d = self.node.get_servermap(MODE_READ)
            else:
                d = defer.succeed(None)
            if self.parentnode and self.name:
                d.addCallback(lambda ignored:
                    self.parentnode.get_metadata_for(self.name))
            else:
                d.addCallback(lambda ignored: None)
            d.addCallback(lambda md: FileJSONMetadata(ctx, self.node, md))
            return d
        if t == "info":
            return MoreInfo(self.node)
        if t == "uri":
            return FileURI(ctx, self.node)
        if t == "readonly-uri":
            return FileReadOnlyURI(ctx, self.node)
        raise WebError("GET file: bad t=%s" % t)
Beispiel #25
0
 def json(self, req):
     req.setHeader("content-type", "text/plain")
     data = {}
     dyhb_events = []
     for serverid,requests in self.download_status.dyhb_requests.iteritems():
         for req in requests:
             dyhb_events.append( (base32.b2a(serverid),) + req )
     dyhb_events.sort(key=lambda req: req[1])
     data["dyhb"] = dyhb_events
     request_events = []
     for serverid,requests in self.download_status.requests.iteritems():
         for req in requests:
             request_events.append( (base32.b2a(serverid),) + req )
     request_events.sort(key=lambda req: (req[4],req[1]))
     data["requests"] = request_events
     data["segment"] = self.download_status.segment_events
     data["read"] = self.download_status.read_events
     return simplejson.dumps(data, indent=1) + "\n"
Beispiel #26
0
 def add_node(self, node, path):
     self.manifest.append( (tuple(path), node.get_uri()) )
     si = node.get_storage_index()
     if si:
         self.storage_index_strings.add(base32.b2a(si))
     v = node.get_verify_cap()
     if v:
         self.verifycaps.add(v.to_string())
     return DeepStats.add_node(self, node, path)
 def _render_si_link(self, ctx, storage_index):
     si_s = base32.b2a(storage_index)
     req = inevow.IRequest(ctx)
     ophandle = req.prepath[-1]
     target = "%s/operations/%s/%s" % (get_root(ctx), ophandle, si_s)
     output = get_arg(ctx, "output")
     if output:
         target = target + "?output=%s" % output
     return T.a(href=target)[si_s]
 def add_one_server(x):
     data["anonymous-storage-FURL"] = "pb://{}@nowhere/fake".format(base32.b2a(str(x)))
     tub = Mock()
     with patch("allmydata.storage_client.Tub", side_effect=[tub]):
         got_announcement('v0-1234-{}'.format(x), data)
         self.assertEqual(tub.mock_calls[-1][0], 'connectTo')
         got_connection = tub.mock_calls[-1][1][1]
     rref = Mock()
     rref.callRemote = Mock(return_value=succeed(1234))
     got_connection(rref)
 def add_one_server(x):
     data["anonymous-storage-FURL"] = "pb://{}@nowhere/fake".format(base32.b2a(str(x)))
     tub = Mock()
     new_tubs.append(tub)
     got_announcement('v0-1234-{}'.format(x), data)
     self.assertEqual(tub.mock_calls[-1][0], 'connectTo')
     got_connection = tub.mock_calls[-1][1][1]
     rref = Mock()
     rref.callRemote = Mock(return_value=succeed(1234))
     got_connection(rref)
Beispiel #30
0
    def json(self, ctx):
        inevow.IRequest(ctx).setHeader("content-type", "text/plain")
        res = self.monitor.get_status()
        data = {}
        data["finished"] = self.monitor.is_finished()
        data["root-storage-index"] = res.get_root_storage_index_string()
        c = res.get_counters()
        data["count-objects-checked"] = c["count-objects-checked"]

        data["count-objects-healthy-pre-repair"] = c["count-objects-healthy-pre-repair"]
        data["count-objects-unhealthy-pre-repair"] = c["count-objects-unhealthy-pre-repair"]
        data["count-objects-healthy-post-repair"] = c["count-objects-healthy-post-repair"]
        data["count-objects-unhealthy-post-repair"] = c["count-objects-unhealthy-post-repair"]

        data["count-repairs-attempted"] = c["count-repairs-attempted"]
        data["count-repairs-successful"] = c["count-repairs-successful"]
        data["count-repairs-unsuccessful"] = c["count-repairs-unsuccessful"]

        data["count-corrupt-shares-pre-repair"] = c["count-corrupt-shares-pre-repair"]
        data["count-corrupt-shares-post-repair"] = c["count-corrupt-shares-pre-repair"]

        data["list-corrupt-shares"] = [ (idlib.nodeid_b2a(serverid),
                                         base32.b2a(storage_index),
                                         shnum)
                                        for (serverid, storage_index, shnum)
                                        in res.get_corrupt_shares() ]

        remaining_corrupt = [ (idlib.nodeid_b2a(serverid),
                               base32.b2a(storage_index),
                               shnum)
                              for (serverid, storage_index, shnum)
                              in res.get_remaining_corrupt_shares() ]
        data["list-remaining-corrupt-shares"] = remaining_corrupt

        unhealthy = [ (path_t,
                       json_check_results(crr.get_pre_repair_results()))
                      for (path_t, crr)
                      in res.get_all_results().items()
                      if not crr.get_pre_repair_results().is_healthy() ]
        data["list-unhealthy-files"] = unhealthy
        data["stats"] = res.get_stats()
        return simplejson.dumps(data, indent=1) + "\n"
Beispiel #31
0
 def get_name(self):
     return base32.b2a(self.serverid)[:8]
Beispiel #32
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min(
            [lease.expiration_time for (i, lease) in m._enumerate_leases(f)])
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == b"\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == b"\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError as e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
             share_hash_chain, block_hash_tree, share_data,
             enc_privkey) = pieces

            print("SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0

            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where, length) in readvs:
                        f.seek(m.DATA_OFFSET + where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)

            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print("MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        else:
            print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)

    elif struct.unpack(">L", prefix[:4]) == (1, ):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")

            def __repr__(self):
                return "<ImmediateReadBucketProxy>"

            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min(
            [lease.expiration_time for lease in sf.get_leases()])
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print("CHK %s %d/%d %d %s %d %s" %
              (si_s, k, N, filesize, ueb_hash, expiration,
               quote_output(abs_sharefile)),
              file=out)

    else:
        print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile),
              file=out)

    f.close()
Beispiel #33
0
 def get_storage_index_string(self):
     return base32.b2a(self.storage_index)
Beispiel #34
0
    def _make_checker_results(self, smap):
        self._monitor.raise_if_cancelled()
        healthy = True
        report = []
        summary = []
        vmap = smap.make_versionmap()
        recoverable = smap.recoverable_versions()
        unrecoverable = smap.unrecoverable_versions()

        if recoverable:
            report.append("Recoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in recoverable
            ]))
        if unrecoverable:
            report.append("Unrecoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in unrecoverable
            ]))
        if smap.unrecoverable_versions():
            healthy = False
            summary.append("some versions are unrecoverable")
            report.append("Unhealthy: some versions are unrecoverable")
        if len(recoverable) == 0:
            healthy = False
            summary.append("no versions are recoverable")
            report.append("Unhealthy: no versions are recoverable")
        if len(recoverable) > 1:
            healthy = False
            summary.append("multiple versions are recoverable")
            report.append("Unhealthy: there are multiple recoverable versions")

        needs_rebalancing = False
        if recoverable:
            best_version = smap.best_recoverable_version()
            report.append("Best Recoverable Version: " +
                          smap.summarize_version(best_version))
            counters = self._count_shares(smap, best_version)
            s = counters["count-shares-good"]
            k = counters["count-shares-needed"]
            N = counters["count-shares-expected"]
            if s < N:
                healthy = False
                report.append("Unhealthy: best version has only %d shares "
                              "(encoding is %d-of-%d)" % (s, k, N))
                summary.append("%d shares (enc %d-of-%d)" % (s, k, N))
            hosts = smap.all_servers_for_version(best_version)
            needs_rebalancing = bool(len(hosts) < N)
        elif unrecoverable:
            healthy = False
            # find a k and N from somewhere
            first = list(unrecoverable)[0]
            # not exactly the best version, but that doesn't matter too much
            counters = self._count_shares(smap, first)
            # leave needs_rebalancing=False: the file being unrecoverable is
            # the bigger problem
        else:
            # couldn't find anything at all
            counters = {
                "count-shares-good": 0,
                "count-shares-needed": 3,  # arbitrary defaults
                "count-shares-expected": 10,
                "count-good-share-hosts": 0,
                "count-wrong-shares": 0,
            }

        corrupt_share_locators = []
        problems = []
        if self.bad_shares:
            report.append("Corrupt Shares:")
            summary.append("Corrupt Shares:")
        for (server, shnum, f) in sorted(self.bad_shares):
            serverid = server.get_serverid()
            locator = (server, self._storage_index, shnum)
            corrupt_share_locators.append(locator)
            s = "%s-sh%d" % (server.get_name(), shnum)
            if f.check(CorruptShareError):
                ft = f.value.reason
            else:
                ft = str(f)
            report.append(" %s: %s" % (s, ft))
            summary.append(s)
            p = (serverid, self._storage_index, shnum, f)
            problems.append(p)
            msg = ("CorruptShareError during mutable verify, "
                   "serverid=%(serverid)s, si=%(si)s, shnum=%(shnum)d, "
                   "where=%(where)s")
            log.msg(format=msg,
                    serverid=server.get_name(),
                    si=base32.b2a(self._storage_index),
                    shnum=shnum,
                    where=ft,
                    level=log.WEIRD,
                    umid="EkK8QA")

        sharemap = dictutil.DictOfSets()
        for verinfo in vmap:
            for (shnum, server, timestamp) in vmap[verinfo]:
                shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum)
                sharemap.add(shareid, server)
        if healthy:
            summary = "Healthy"
        else:
            summary = "Unhealthy: " + " ".join(summary)

        cr = CheckResults(
            from_string(self._node.get_uri()),
            self._storage_index,
            healthy=healthy,
            recoverable=bool(recoverable),
            needs_rebalancing=needs_rebalancing,
            count_shares_needed=counters["count-shares-needed"],
            count_shares_expected=counters["count-shares-expected"],
            count_shares_good=counters["count-shares-good"],
            count_good_share_hosts=counters["count-good-share-hosts"],
            count_recoverable_versions=len(recoverable),
            count_unrecoverable_versions=len(unrecoverable),
            servers_responding=list(smap.get_reachable_servers()),
            sharemap=sharemap,
            count_wrong_shares=counters["count-wrong-shares"],
            list_corrupt_shares=corrupt_share_locators,
            count_corrupt_shares=len(corrupt_share_locators),
            list_incompatible_shares=[],
            count_incompatible_shares=0,
            summary=summary,
            report=report,
            share_problems=problems,
            servermap=smap.copy())
        return cr
Beispiel #35
0
 def render_si(self, ctx, data):
     si = self.original.get_storage_index()
     if not si:
         return "None"
     return ctx.tag[base32.b2a(si)]
Beispiel #36
0
def _make_secret():
    return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
Beispiel #37
0
 def si(self, req, tag):
     si = self.original.get_storage_index()
     if not si:
         return "None"
     return tag(base32.b2a(si))
Beispiel #38
0
def dump_SDMF_share(m, length, options):
    from allmydata.mutable.layout import unpack_share, unpack_header
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.util import base32, hashutil
    from allmydata.uri import SSKVerifierURI
    from allmydata.util.encodingutil import quote_output, to_bytes

    offset = m.DATA_OFFSET

    out = options.stdout

    f = open(options['filename'], "rb")
    f.seek(offset)
    data = f.read(min(length, 2000))
    f.close()

    try:
        pieces = unpack_share(data)
    except NeedMoreDataError as e:
        # retry once with the larger size
        size = e.needed_bytes
        f = open(options['filename'], "rb")
        f.seek(offset)
        data = f.read(min(length, size))
        f.close()
        pieces = unpack_share(data)

    (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
     share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces
    (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
     ig_datalen, offsets) = unpack_header(data)

    print(" SDMF contents:", file=out)
    print("  seqnum: %d" % seqnum, file=out)
    print("  root_hash: %s" % base32.b2a(root_hash), file=out)
    print("  IV: %s" % base32.b2a(IV), file=out)
    print("  required_shares: %d" % k, file=out)
    print("  total_shares: %d" % N, file=out)
    print("  segsize: %d" % segsize, file=out)
    print("  datalen: %d" % datalen, file=out)
    print("  enc_privkey: %d bytes" % len(enc_privkey), file=out)
    print("  pubkey: %d bytes" % len(pubkey), file=out)
    print("  signature: %d bytes" % len(signature), file=out)
    share_hash_ids = ",".join(
        sorted([str(hid) for hid in share_hash_chain.keys()]))
    print("  share_hash_chain: %s" % share_hash_ids, file=out)
    print("  block_hash_tree: %d nodes" % len(block_hash_tree), file=out)

    # the storage index isn't stored in the share itself, so we depend upon
    # knowing the parent directory name to get it
    pieces = options['filename'].split(os.sep)
    if len(pieces) >= 2:
        piece = to_bytes(pieces[-2])
        if base32.could_be_base32_encoded(piece):
            storage_index = base32.a2b(piece)
            fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
            u = SSKVerifierURI(storage_index, fingerprint)
            verify_cap = u.to_string()
            print("  verify-cap:",
                  quote_output(verify_cap, quotemarks=False),
                  file=out)

    if options['offsets']:
        # NOTE: this offset-calculation code is fragile, and needs to be
        # merged with MutableShareFile's internals.
        print(file=out)
        print(" Section Offsets:", file=out)

        def printoffset(name, value, shift=0):
            print("%s%20s: %s   (0x%x)" % (" " * shift, name, value, value),
                  file=out)

        printoffset("first lease", m.HEADER_SIZE)
        printoffset("share data", m.DATA_OFFSET)
        o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
        printoffset("seqnum", o_seqnum, 2)
        o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
        printoffset("root_hash", o_root_hash, 2)
        for k in [
                "signature", "share_hash_chain", "block_hash_tree",
                "share_data", "enc_privkey", "EOF"
        ]:
            name = {
                "share_data": "block data",
                "EOF": "end of share data"
            }.get(k, k)
            offset = m.DATA_OFFSET + offsets[k]
            printoffset(name, offset, 2)
        f = open(options['filename'], "rb")
        printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
        f.close()

    print(file=out)
Beispiel #39
0
def dump_MDMF_share(m, length, options):
    from allmydata.mutable.layout import MDMFSlotReadProxy
    from allmydata.util import base32, hashutil
    from allmydata.uri import MDMFVerifierURI
    from allmydata.util.encodingutil import quote_output, to_bytes

    offset = m.DATA_OFFSET
    out = options.stdout

    f = open(options['filename'], "rb")
    storage_index = None
    shnum = 0

    class ShareDumper(MDMFSlotReadProxy):
        def _read(self, readvs, force_remote=False, queue=False):
            data = []
            for (where, length) in readvs:
                f.seek(offset + where)
                data.append(f.read(length))
            return defer.succeed({shnum: data})

    p = ShareDumper(None, storage_index, shnum)

    def extract(func):
        stash = []
        # these methods return Deferreds, but we happen to know that they run
        # synchronously when not actually talking to a remote server
        d = func()
        d.addCallback(stash.append)
        return stash[0]

    verinfo = extract(p.get_verinfo)
    encprivkey = extract(p.get_encprivkey)
    signature = extract(p.get_signature)
    pubkey = extract(p.get_verification_key)
    block_hash_tree = extract(p.get_blockhashes)
    share_hash_chain = extract(p.get_sharehashes)
    f.close()

    (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
     offsets) = verinfo

    print(" MDMF contents:", file=out)
    print("  seqnum: %d" % seqnum, file=out)
    print("  root_hash: %s" % base32.b2a(root_hash), file=out)
    #print("  IV: %s" % base32.b2a(IV), file=out)
    print("  required_shares: %d" % k, file=out)
    print("  total_shares: %d" % N, file=out)
    print("  segsize: %d" % segsize, file=out)
    print("  datalen: %d" % datalen, file=out)
    print("  enc_privkey: %d bytes" % len(encprivkey), file=out)
    print("  pubkey: %d bytes" % len(pubkey), file=out)
    print("  signature: %d bytes" % len(signature), file=out)
    share_hash_ids = ",".join(
        [str(hid) for hid in sorted(share_hash_chain.keys())])
    print("  share_hash_chain: %s" % share_hash_ids, file=out)
    print("  block_hash_tree: %d nodes" % len(block_hash_tree), file=out)

    # the storage index isn't stored in the share itself, so we depend upon
    # knowing the parent directory name to get it
    pieces = options['filename'].split(os.sep)
    if len(pieces) >= 2:
        piece = to_bytes(pieces[-2])
        if base32.could_be_base32_encoded(piece):
            storage_index = base32.a2b(piece)
            fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
            u = MDMFVerifierURI(storage_index, fingerprint)
            verify_cap = u.to_string()
            print("  verify-cap:",
                  quote_output(verify_cap, quotemarks=False),
                  file=out)

    if options['offsets']:
        # NOTE: this offset-calculation code is fragile, and needs to be
        # merged with MutableShareFile's internals.

        print(file=out)
        print(" Section Offsets:", file=out)

        def printoffset(name, value, shift=0):
            print("%s%.20s: %s   (0x%x)" % (" " * shift, name, value, value),
                  file=out)

        printoffset("first lease", m.HEADER_SIZE, 2)
        printoffset("share data", m.DATA_OFFSET, 2)
        o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
        printoffset("seqnum", o_seqnum, 4)
        o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
        printoffset("root_hash", o_root_hash, 4)
        for k in [
                "enc_privkey", "share_hash_chain", "signature",
                "verification_key", "verification_key_end", "share_data",
                "block_hash_tree", "EOF"
        ]:
            name = {
                "share_data": "block data",
                "verification_key": "pubkey",
                "verification_key_end": "end of pubkey",
                "EOF": "end of share data"
            }.get(k, k)
            offset = m.DATA_OFFSET + offsets[k]
            printoffset(name, offset, 4)
        f = open(options['filename'], "rb")
        printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2)
        f.close()

    print(file=out)
Beispiel #40
0
 def to_string(self):
     assert isinstance(self.readkey, str)
     assert isinstance(self.fingerprint, str)
     ret = 'URI:MDMF-RO:%s:%s' % (base32.b2a(
         self.readkey), base32.b2a(self.fingerprint))
     return ret
Beispiel #41
0
 def abbrev(self):
     return base32.b2a(self.writekey[:5])
Beispiel #42
0
 def to_string(self):
     assert isinstance(self.writekey, str)
     assert isinstance(self.fingerprint, str)
     return 'URI:SSK:%s:%s' % (base32.b2a(
         self.writekey), base32.b2a(self.fingerprint))
Beispiel #43
0
            if peerlist[0] == target:
                # great!
                if verbose: print "  yay!"
                fn = base32.b2a(target)
                if nodes[target]:
                    nickname = nodes[target].replace("/", "_")
                    fn += "-" + nickname
                fn += ".txt"
                fn = os.path.join("canaries", fn)
                open(fn, "w").write(data)
                return True
            # nope, must try again
            if verbose: print "  boo"
            return False

        d.addCallback(_got_si)
        # get sneaky and look inside the Deferred for the synchronous result
        if d.result:
            return attempts


os.mkdir("canaries")
attempts = []
for target in nodes:
    target_s = base32.b2a(target)
    print "working on", target_s
    attempts.append(find_share_for_target(target))
print "done"
print "%d attempts total, avg %d per target, max %d" % \
      (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))
Beispiel #44
0
 def get_longname(self):
     return base32.b2a(self.serverid)
Beispiel #45
0
 def get_long_nodeid(self):
     # this matches what IServer.get_longname() says about us elsewhere
     vk_bytes = self._node_key.get_verifying_key_bytes()
     return "v0-" + base32.b2a(vk_bytes)
Beispiel #46
0
 def abbrev_si(self):
     si = self._filenode_uri.get_storage_index()
     if si is None:
         return "<LIT>"
     return base32.b2a(si)[:5]
Beispiel #47
0
 def _testknown(self, hashf, expected_a, *args):
     got = hashf(*args)
     self.assertIsInstance(got, bytes)
     got_a = base32.b2a(got)
     self.failUnlessEqual(got_a, expected_a)
Beispiel #48
0
    def __init__(self, rref, server, verifycap, commonshare, node,
                 download_status, shnum, dyhb_rtt, logparent):
        self._rref = rref
        self._server = server
        self._node = node  # holds share_hash_tree and UEB
        self.actual_segment_size = node.segment_size  # might still be None
        # XXX change node.guessed_segment_size to
        # node.best_guess_segment_size(), which should give us the real ones
        # if known, else its guess.
        self._guess_offsets(verifycap, node.guessed_segment_size)
        self.actual_offsets = None
        self._UEB_length = None
        self._commonshare = commonshare  # holds block_hash_tree
        self._download_status = download_status
        self._storage_index = verifycap.storage_index
        self._si_prefix = base32.b2a(verifycap.storage_index)[:8]
        self._shnum = shnum
        self._dyhb_rtt = dyhb_rtt
        # self._alive becomes False upon fatal corruption or server error
        self._alive = True
        self._loop_scheduled = False
        self._lp = log.msg(format="%(share)s created",
                           share=repr(self),
                           level=log.NOISY,
                           parent=logparent,
                           umid="P7hv2w")

        self._pending = Spans()  # request sent but no response received yet
        self._received = DataSpans()  # ACK response received, with data
        self._unavailable = Spans()  # NAK response received, no data

        # any given byte of the share can be in one of four states:
        #  in: _wanted, _requested, _received
        #      FALSE    FALSE       FALSE : don't care about it at all
        #      TRUE     FALSE       FALSE : want it, haven't yet asked for it
        #      TRUE     TRUE        FALSE : request is in-flight
        #                                   or didn't get it
        #      FALSE    TRUE        TRUE  : got it, haven't used it yet
        #      FALSE    TRUE        FALSE : got it and used it
        #      FALSE    FALSE       FALSE : block consumed, ready to ask again
        #
        # when we request data and get a NAK, we leave it in _requested
        # to remind ourself to not ask for it again. We don't explicitly
        # remove it from anything (maybe this should change).
        #
        # We retain the hashtrees in the Node, so we leave those spans in
        # _requested (and never ask for them again, as long as the Node is
        # alive). But we don't retain data blocks (too big), so when we
        # consume a data block, we remove it from _requested, so a later
        # download can re-fetch it.

        self._requested_blocks = []  # (segnum, set(observer2..))
        v = server.get_version()
        ver = v["http://allmydata.org/tahoe/protocols/storage/v1"]
        self._overrun_ok = ver["tolerates-immutable-read-overrun"]
        # If _overrun_ok and we guess the offsets correctly, we can get
        # everything in one RTT. If _overrun_ok and we guess wrong, we might
        # need two RTT (but we could get lucky and do it in one). If overrun
        # is *not* ok (tahoe-1.3.0 or earlier), we need four RTT: 1=version,
        # 2=offset table, 3=UEB_length and everything else (hashes, block),
        # 4=UEB.

        self.had_corruption = False  # for unit tests
Beispiel #49
0
 def abbrev(self, storage_index_or_none):
     if storage_index_or_none:
         return base32.b2a(storage_index_or_none)[:6]
     return "LIT file"
Beispiel #50
0
 def to_string(self):
     assert isinstance(self.writekey, bytes)
     assert isinstance(self.fingerprint, bytes)
     ret = b'URI:MDMF:%s:%s' % (base32.b2a(
         self.writekey), base32.b2a(self.fingerprint))
     return ret
Beispiel #51
0
def _make_secret():
    """
    Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE
    bytes.
    """
    return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + b"\n"
Beispiel #52
0
 def to_string(self):
     assert isinstance(self.storage_index, bytes)
     assert isinstance(self.fingerprint, bytes)
     return b'URI:SSK-Verifier:%s:%s' % (si_b2a(
         self.storage_index), base32.b2a(self.fingerprint))
Beispiel #53
0
 def abbrev_verinfo(self, verinfo):
     if verinfo is None:
         return None
     (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
      offsets_tuple) = verinfo
     return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
Beispiel #54
0
 def to_string(self):
     assert isinstance(self.readkey, bytes)
     assert isinstance(self.fingerprint, bytes)
     return b'URI:SSK-RO:%s:%s' % (base32.b2a(
         self.readkey), base32.b2a(self.fingerprint))
Beispiel #55
0
 def abbrev(self):
     return base32.b2a(self.readkey[:5])
Beispiel #56
0
def dump_uri_instance(u, nodeid, secret, out, show_header=True):
    from allmydata import uri
    from allmydata.storage.server import si_b2a
    from allmydata.util import base32, hashutil
    from allmydata.util.encodingutil import quote_output

    if isinstance(u, uri.CHKFileURI):
        if show_header:
            print("CHK File:", file=out)
        print(" key:", base32.b2a(u.key), file=out)
        print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
        print(" size:", u.size, file=out)
        print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        _dump_secrets(u.get_storage_index(), secret, nodeid, out)
    elif isinstance(u, uri.CHKFileVerifierURI):
        if show_header:
            print("CHK Verifier URI:", file=out)
        print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
        print(" size:", u.size, file=out)
        print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)

    elif isinstance(u, uri.LiteralFileURI):
        if show_header:
            print("Literal File URI:", file=out)
        print(" data:", quote_output(u.data), file=out)

    elif isinstance(u, uri.WriteableSSKFileURI):  # SDMF
        if show_header:
            print("SDMF Writeable URI:", file=out)
        print(" writekey:", base32.b2a(u.writekey), file=out)
        print(" readkey:", base32.b2a(u.readkey), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
        print(file=out)
        if nodeid:
            we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
            print(" write_enabler:", base32.b2a(we), file=out)
            print(file=out)
        _dump_secrets(u.get_storage_index(), secret, nodeid, out)
    elif isinstance(u, uri.ReadonlySSKFileURI):
        if show_header:
            print("SDMF Read-only URI:", file=out)
        print(" readkey:", base32.b2a(u.readkey), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
    elif isinstance(u, uri.SSKVerifierURI):
        if show_header:
            print("SDMF Verifier URI:", file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)

    elif isinstance(u, uri.WriteableMDMFFileURI):  # MDMF
        if show_header:
            print("MDMF Writeable URI:", file=out)
        print(" writekey:", base32.b2a(u.writekey), file=out)
        print(" readkey:", base32.b2a(u.readkey), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
        print(file=out)
        if nodeid:
            we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
            print(" write_enabler:", base32.b2a(we), file=out)
            print(file=out)
        _dump_secrets(u.get_storage_index(), secret, nodeid, out)
    elif isinstance(u, uri.ReadonlyMDMFFileURI):
        if show_header:
            print("MDMF Read-only URI:", file=out)
        print(" readkey:", base32.b2a(u.readkey), file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
    elif isinstance(u, uri.MDMFVerifierURI):
        if show_header:
            print("MDMF Verifier URI:", file=out)
        print(" storage index:", si_b2a(u.get_storage_index()), file=out)
        print(" fingerprint:", base32.b2a(u.fingerprint), file=out)

    elif isinstance(u, uri.ImmutableDirectoryURI):  # CHK-based directory
        if show_header:
            print("CHK Directory URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
    elif isinstance(u, uri.ImmutableDirectoryURIVerifier):
        if show_header:
            print("CHK Directory Verifier URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)

    elif isinstance(u, uri.DirectoryURI):  # SDMF-based directory
        if show_header:
            print("Directory Writeable URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
    elif isinstance(u, uri.ReadonlyDirectoryURI):
        if show_header:
            print("Directory Read-only URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
    elif isinstance(u, uri.DirectoryURIVerifier):
        if show_header:
            print("Directory Verifier URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)

    elif isinstance(u, uri.MDMFDirectoryURI):  # MDMF-based directory
        if show_header:
            print("Directory Writeable URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
    elif isinstance(u, uri.ReadonlyMDMFDirectoryURI):
        if show_header:
            print("Directory Read-only URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
    elif isinstance(u, uri.MDMFDirectoryURIVerifier):
        if show_header:
            print("Directory Verifier URI:", file=out)
        dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)

    else:
        print("unknown cap type", file=out)
Beispiel #57
0
 def abbrev_si(self):
     return base32.b2a(self.storage_index)[:5]
Beispiel #58
0
 def to_string(self):
     assert isinstance(self.storage_index, str)
     assert isinstance(self.fingerprint, str)
     ret = 'URI:MDMF-Verifier:%s:%s' % (si_b2a(
         self.storage_index), base32.b2a(self.fingerprint))
     return ret
Beispiel #59
0
 def to_string(self):
     return 'URI:LIT:%s' % base32.b2a(self.data)
Beispiel #60
0
 def _si_abbrev(self):
     si = self.monitor.origin_si
     if not si:
         return "<LIT>"
     return base32.b2a(si)[:6]