def test_a2b_b2a_match_Pythons(self, input_bytes): encoded = base32.b2a(input_bytes) x = base64.b32encode(input_bytes).rstrip(b"=").lower() self.failUnlessEqual(encoded, x) self.assertIsInstance(encoded, bytes) self.assertTrue(base32.could_be_base32_encoded(encoded)) self.assertEqual(base32.a2b(encoded), input_bytes)
def dump_immutable_chk_share(f, out, options): from allmydata import uri from allmydata.util import base32 from allmydata.immutable.layout import ReadBucketProxy from allmydata.util.encodingutil import quote_output, to_bytes # use a ReadBucketProxy to parse the bucket and find the uri extension bp = ReadBucketProxy(None, None, '') offsets = bp._parse_offsets(f.read_share_data(0, 0x44)) print("%20s: %d" % ("version", bp._version), file=out) seek = offsets['uri_extension'] length = struct.unpack(bp._fieldstruct, f.read_share_data(seek, bp._fieldsize))[0] seek += bp._fieldsize UEB_data = f.read_share_data(seek, length) unpacked = uri.unpack_extension_readable(UEB_data) keys1 = ("size", "num_segments", "segment_size", "needed_shares", "total_shares") keys2 = ("codec_name", "codec_params", "tail_codec_params") keys3 = ("plaintext_hash", "plaintext_root_hash", "crypttext_hash", "crypttext_root_hash", "share_root_hash", "UEB_hash") display_keys = {"size": "file_size"} def to_string(v): if isinstance(v, bytes): return unicode(v, "utf-8") else: return str(v) for k in keys1: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) print(file=out) for k in keys2: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) print(file=out) for k in keys3: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3) if leftover: print(file=out) print("LEFTOVER:", file=out) for k in sorted(leftover): print("%20s: %s" % (k, to_string(unpacked[k])), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) uri_extension_hash = base32.a2b(unpacked["UEB_hash"]) u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash, unpacked["needed_shares"], unpacked["total_shares"], unpacked["size"]) verify_cap = u.to_string() print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out) sizes = {} sizes['data'] = (offsets['plaintext_hash_tree'] - offsets['data']) sizes['validation'] = (offsets['uri_extension'] - offsets['plaintext_hash_tree']) sizes['uri-extension'] = len(UEB_data) print(file=out) print(" Size of data within the share:", file=out) for k in sorted(sizes): print("%20s: %s" % (k, sizes[k]), file=out) if options['offsets']: print(file=out) print(" Section Offsets:", file=out) print("%20s: %s" % ("share data", f._data_offset), file=out) for k in [ "data", "plaintext_hash_tree", "crypttext_hash_tree", "block_hashes", "share_hashes", "uri_extension" ]: name = {"data": "block data"}.get(k, k) offset = f._data_offset + offsets[k] print(" %20s: %s (0x%x)" % (name, offset, offset), file=out) print("%20s: %s" % ("leases", f._lease_offset), file=out)
def dump_MDMF_share(m, length, options): from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.util import base32, hashutil from allmydata.uri import MDMFVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") storage_index = None shnum = 0 class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where, length) in readvs: f.seek(offset + where) data.append(f.read(length)) return defer.succeed({shnum: data}) p = ShareDumper(None, storage_index, shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that they run # synchronously when not actually talking to a remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) encprivkey = extract(p.get_encprivkey) signature = extract(p.get_signature) pubkey = extract(p.get_verification_key) block_hash_tree = extract(p.get_blockhashes) share_hash_chain = extract(p.get_sharehashes) f.close() (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print(" MDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out) #print(" IV: %s" % base32.b2a(IV), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(encprivkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join( [str(hid) for hid in sorted(share_hash_chain.keys())]) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = MDMFVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%.20s: %s (0x%x)" % (" " * shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE, 2) printoffset("share data", m.DATA_OFFSET, 2) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 4) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 4) for k in [ "enc_privkey", "share_hash_chain", "signature", "verification_key", "verification_key_end", "share_data", "block_hash_tree", "EOF" ]: name = { "share_data": "block data", "verification_key": "pubkey", "verification_key_end": "end of pubkey", "EOF": "end of share data" }.get(k, k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 4) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) f.close() print(file=out)
def dump_SDMF_share(m, length, options): from allmydata.mutable.layout import unpack_share, unpack_header from allmydata.mutable.common import NeedMoreDataError from allmydata.util import base32, hashutil from allmydata.uri import SSKVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, 2000)) f.close() try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, size)) f.close() pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) print(" SDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out) print(" IV: %s" % unicode(base32.b2a(IV), "utf-8"), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join( sorted([str(hid) for hid in share_hash_chain.keys()])) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%20s: %s (0x%x)" % (" " * shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 2) for k in [ "signature", "share_hash_chain", "block_hash_tree", "share_data", "enc_privkey", "EOF" ]: name = { "share_data": "block data", "EOF": "end of share data" }.get(k, k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 2) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4) f.close() print(file=out)
def dump_immutable_chk_share(f, out, options): from allmydata import uri from allmydata.util import base32 from allmydata.immutable.layout import ReadBucketProxy from allmydata.util.encodingutil import quote_output, to_str # use a ReadBucketProxy to parse the bucket and find the uri extension bp = ReadBucketProxy(None, '', '') offsets = bp._parse_offsets(f.read_share_data(0, 0x44)) print >>out, "%20s: %d" % ("version", bp._version) seek = offsets['uri_extension'] length = struct.unpack(bp._fieldstruct, f.read_share_data(seek, bp._fieldsize))[0] seek += bp._fieldsize UEB_data = f.read_share_data(seek, length) unpacked = uri.unpack_extension_readable(UEB_data) keys1 = ("size", "num_segments", "segment_size", "needed_shares", "total_shares") keys2 = ("codec_name", "codec_params", "tail_codec_params") keys3 = ("plaintext_hash", "plaintext_root_hash", "crypttext_hash", "crypttext_root_hash", "share_root_hash", "UEB_hash") display_keys = {"size": "file_size"} for k in keys1: if k in unpacked: dk = display_keys.get(k, k) print >>out, "%20s: %s" % (dk, unpacked[k]) print >>out for k in keys2: if k in unpacked: dk = display_keys.get(k, k) print >>out, "%20s: %s" % (dk, unpacked[k]) print >>out for k in keys3: if k in unpacked: dk = display_keys.get(k, k) print >>out, "%20s: %s" % (dk, unpacked[k]) leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3) if leftover: print >>out print >>out, "LEFTOVER:" for k in sorted(leftover): print >>out, "%20s: %s" % (k, unpacked[k]) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) uri_extension_hash = base32.a2b(unpacked["UEB_hash"]) u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash, unpacked["needed_shares"], unpacked["total_shares"], unpacked["size"]) verify_cap = u.to_string() print >>out, "%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)) sizes = {} sizes['data'] = (offsets['plaintext_hash_tree'] - offsets['data']) sizes['validation'] = (offsets['uri_extension'] - offsets['plaintext_hash_tree']) sizes['uri-extension'] = len(UEB_data) print >>out print >>out, " Size of data within the share:" for k in sorted(sizes): print >>out, "%20s: %s" % (k, sizes[k]) if options['offsets']: print >>out print >>out, " Section Offsets:" print >>out, "%20s: %s" % ("share data", f._data_offset) for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree", "block_hashes", "share_hashes", "uri_extension"]: name = {"data": "block data"}.get(k,k) offset = f._data_offset + offsets[k] print >>out, " %20s: %s (0x%x)" % (name, offset, offset) print >>out, "%20s: %s" % ("leases", f._lease_offset)
print >>out, " segsize: %d" % segsize print >>out, " datalen: %d" % datalen print >>out, " enc_privkey: %d bytes" % len(enc_privkey) print >>out, " pubkey: %d bytes" % len(pubkey) print >>out, " signature: %d bytes" % len(signature) share_hash_ids = ",".join(sorted([str(hid) for hid in share_hash_chain.keys()])) print >>out, " share_hash_chain: %s" % share_hash_ids print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print >>out print >>out, " Section Offsets:" def printoffset(name, value, shift=0): print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET)
def dump_MDMF_share(m, length, options): from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.util import base32, hashutil from allmydata.uri import MDMFVerifierURI from allmydata.util.encodingutil import quote_output, to_str offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") storage_index = None; shnum = 0 class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where,length) in readvs: f.seek(offset+where) data.append(f.read(length)) return defer.succeed({shnum: data}) p = ShareDumper(None, storage_index, shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that they run # synchronously when not actually talking to a remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) encprivkey = extract(p.get_encprivkey) signature = extract(p.get_signature) pubkey = extract(p.get_verification_key) block_hash_tree = extract(p.get_blockhashes) share_hash_chain = extract(p.get_sharehashes) f.close() (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print >>out, " MDMF contents:" print >>out, " seqnum: %d" % seqnum print >>out, " root_hash: %s" % base32.b2a(root_hash) #print >>out, " IV: %s" % base32.b2a(IV) print >>out, " required_shares: %d" % k print >>out, " total_shares: %d" % N print >>out, " segsize: %d" % segsize print >>out, " datalen: %d" % datalen print >>out, " enc_privkey: %d bytes" % len(encprivkey) print >>out, " pubkey: %d bytes" % len(pubkey) print >>out, " signature: %d bytes" % len(signature) share_hash_ids = ",".join([str(hid) for hid in sorted(share_hash_chain.keys())]) print >>out, " share_hash_chain: %s" % share_hash_ids print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = MDMFVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print >>out print >>out, " Section Offsets:" def printoffset(name, value, shift=0): print >>out, "%s%.20s: %s (0x%x)" % (" "*shift, name, value, value) printoffset("first lease", m.HEADER_SIZE, 2) printoffset("share data", m.DATA_OFFSET, 2) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 4) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 4) for k in ["enc_privkey", "share_hash_chain", "signature", "verification_key", "verification_key_end", "share_data", "block_hash_tree", "EOF"]: name = {"share_data": "block data", "verification_key": "pubkey", "verification_key_end": "end of pubkey", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 4) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) f.close() print >>out
def test_a2b(self): self.failUnlessEqual(base32.a2b(b"ci2a"), b"\x12\x34") self.failUnlessRaises(AssertionError, base32.a2b, b"b0gus") self.assertFalse(base32.could_be_base32_encoded(b"b0gus"))
def dump_SDMF_share(m, length, options): from allmydata.mutable.layout import unpack_share, unpack_header from allmydata.mutable.common import NeedMoreDataError from allmydata.util import base32, hashutil from allmydata.uri import SSKVerifierURI from allmydata.util.encodingutil import quote_output, to_str offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, 2000)) f.close() try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, size)) f.close() pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) print(" SDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % base32.b2a(root_hash), file=out) print(" IV: %s" % base32.b2a(IV), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join(sorted([str(hid) for hid in share_hash_chain.keys()])) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 2) for k in ["signature", "share_hash_chain", "block_hash_tree", "share_data", "enc_privkey", "EOF"]: name = {"share_data": "block data", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 2) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4) f.close() print(file=out)