def _got_key( (pubkey, privkey) ): nm.key_generator = SameKeyGenerator(pubkey, privkey) pubkey_s = pubkey.serialize() privkey_s = privkey.serialize() u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s), ssk_pubkey_fingerprint_hash(pubkey_s)) self._storage_index = u.get_storage_index()
def _got_results_one_share(self, shnum, data, peerid, lp): self.log(format="_got_results: got shnum #%(shnum)d from peerid %(peerid)s", shnum=shnum, peerid=idlib.shortnodeid_b2a(peerid), level=log.NOISY, parent=lp) # this might raise NeedMoreDataError, if the pubkey and signature # live at some weird offset. That shouldn't happen, so I'm going to # treat it as a bad share. (seqnum, root_hash, IV, k, N, segsize, datalength, pubkey_s, signature, prefix) = unpack_prefix_and_signature(data) if not self._node.get_pubkey(): fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) assert len(fingerprint) == 32 if fingerprint != self._node.get_fingerprint(): raise CorruptShareError(peerid, shnum, "pubkey doesn't match fingerprint") self._node._populate_pubkey(self._deserialize_pubkey(pubkey_s)) if self._need_privkey: self._try_to_extract_privkey(data, peerid, shnum, lp) (ig_version, ig_seqnum, ig_root_hash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) verinfo = (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) if verinfo not in self._valid_versions: # it's a new pair. Verify the signature. valid = self._node.get_pubkey().verify(prefix, signature) if not valid: raise CorruptShareError(peerid, shnum, "signature is invalid") # ok, it's a valid verinfo. Add it to the list of validated # versions. self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d" % (seqnum, base32.b2a(root_hash)[:4], idlib.shortnodeid_b2a(peerid), shnum, k, N, segsize, datalength), parent=lp) self._valid_versions.add(verinfo) # We now know that this is a valid candidate verinfo. if (peerid, shnum) in self._servermap.bad_shares: # we've been told that the rest of the data in this share is # unusable, so don't add it to the servermap. self.log("but we've been told this is a bad share", parent=lp, level=log.UNUSUAL) return verinfo # Add the info to our servermap. timestamp = time.time() self._servermap.add_new_share(peerid, shnum, verinfo, timestamp) # and the versionmap self.versionmap.add(verinfo, (shnum, peerid, timestamp)) return verinfo
def create_with_keys(self, keypair, contents, version=SDMF_VERSION): """Call this to create a brand-new mutable file. It will create the shares, find homes for them, and upload the initial contents (created with the same rules as IClient.create_mutable_file() ). Returns a Deferred that fires (with the MutableFileNode instance you should use) when it completes. """ (pubkey, privkey) = keypair self._pubkey, self._privkey = pubkey, privkey pubkey_s = self._pubkey.serialize() privkey_s = self._privkey.serialize() self._writekey = hashutil.ssk_writekey_hash(privkey_s) self._encprivkey = self._encrypt_privkey(self._writekey, privkey_s) self._fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) if version == MDMF_VERSION: self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint) self._protocol_version = version elif version == SDMF_VERSION: self._uri = WriteableSSKFileURI(self._writekey, self._fingerprint) self._protocol_version = version self._readkey = self._uri.readkey self._storage_index = self._uri.storage_index initial_contents = self._get_initial_contents(contents) return self._upload(initial_contents, None)
def dump_MDMF_share(m, length, options): from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.util import base32, hashutil from allmydata.uri import MDMFVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") storage_index = None shnum = 0 class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where, length) in readvs: f.seek(offset + where) data.append(f.read(length)) return defer.succeed({shnum: data}) p = ShareDumper(None, storage_index, shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that they run # synchronously when not actually talking to a remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) encprivkey = extract(p.get_encprivkey) signature = extract(p.get_signature) pubkey = extract(p.get_verification_key) block_hash_tree = extract(p.get_blockhashes) share_hash_chain = extract(p.get_sharehashes) f.close() (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print(" MDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out) #print(" IV: %s" % base32.b2a(IV), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(encprivkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join( [str(hid) for hid in sorted(share_hash_chain.keys())]) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = MDMFVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%.20s: %s (0x%x)" % (" " * shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE, 2) printoffset("share data", m.DATA_OFFSET, 2) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 4) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 4) for k in [ "enc_privkey", "share_hash_chain", "signature", "verification_key", "verification_key_end", "share_data", "block_hash_tree", "EOF" ]: name = { "share_data": "block data", "verification_key": "pubkey", "verification_key_end": "end of pubkey", "EOF": "end of share data" }.get(k, k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 4) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) f.close() print(file=out)
def dump_SDMF_share(m, length, options): from allmydata.mutable.layout import unpack_share, unpack_header from allmydata.mutable.common import NeedMoreDataError from allmydata.util import base32, hashutil from allmydata.uri import SSKVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, 2000)) f.close() try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, size)) f.close() pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) print(" SDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out) print(" IV: %s" % unicode(base32.b2a(IV), "utf-8"), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join( sorted([str(hid) for hid in share_hash_chain.keys()])) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%20s: %s (0x%x)" % (" " * shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 2) for k in [ "signature", "share_hash_chain", "block_hash_tree", "share_data", "enc_privkey", "EOF" ]: name = { "share_data": "block data", "EOF": "end of share data" }.get(k, k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 2) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4) f.close() print(file=out)
print >>out, " enc_privkey: %d bytes" % len(enc_privkey) print >>out, " pubkey: %d bytes" % len(pubkey) print >>out, " signature: %d bytes" % len(signature) share_hash_ids = ",".join(sorted([str(hid) for hid in share_hash_chain.keys()])) print >>out, " share_hash_chain: %s" % share_hash_ids print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print >>out print >>out, " Section Offsets:" def printoffset(name, value, shift=0): print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2)
def dump_MDMF_share(m, length, options): from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.util import base32, hashutil from allmydata.uri import MDMFVerifierURI from allmydata.util.encodingutil import quote_output, to_str offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") storage_index = None; shnum = 0 class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where,length) in readvs: f.seek(offset+where) data.append(f.read(length)) return defer.succeed({shnum: data}) p = ShareDumper(None, storage_index, shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that they run # synchronously when not actually talking to a remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) encprivkey = extract(p.get_encprivkey) signature = extract(p.get_signature) pubkey = extract(p.get_verification_key) block_hash_tree = extract(p.get_blockhashes) share_hash_chain = extract(p.get_sharehashes) f.close() (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print >>out, " MDMF contents:" print >>out, " seqnum: %d" % seqnum print >>out, " root_hash: %s" % base32.b2a(root_hash) #print >>out, " IV: %s" % base32.b2a(IV) print >>out, " required_shares: %d" % k print >>out, " total_shares: %d" % N print >>out, " segsize: %d" % segsize print >>out, " datalen: %d" % datalen print >>out, " enc_privkey: %d bytes" % len(encprivkey) print >>out, " pubkey: %d bytes" % len(pubkey) print >>out, " signature: %d bytes" % len(signature) share_hash_ids = ",".join([str(hid) for hid in sorted(share_hash_chain.keys())]) print >>out, " share_hash_chain: %s" % share_hash_ids print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = MDMFVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print >>out print >>out, " Section Offsets:" def printoffset(name, value, shift=0): print >>out, "%s%.20s: %s (0x%x)" % (" "*shift, name, value, value) printoffset("first lease", m.HEADER_SIZE, 2) printoffset("share data", m.DATA_OFFSET, 2) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 4) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 4) for k in ["enc_privkey", "share_hash_chain", "signature", "verification_key", "verification_key_end", "share_data", "block_hash_tree", "EOF"]: name = {"share_data": "block data", "verification_key": "pubkey", "verification_key_end": "end of pubkey", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 4) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) f.close() print >>out
class MutableFileNode: implements(IMutableFileNode, ICheckable) def __init__(self, storage_broker, secret_holder, default_encoding_parameters, history): self._storage_broker = storage_broker self._secret_holder = secret_holder self._default_encoding_parameters = default_encoding_parameters self._history = history self._pubkey = None # filled in upon first read self._privkey = None # filled in if we're mutable # we keep track of the last encoding parameters that we use. These # are updated upon retrieve, and used by publish. If we publish # without ever reading (i.e. overwrite()), then we use these values. self._required_shares = default_encoding_parameters["k"] self._total_shares = default_encoding_parameters["n"] self._sharemap = {} # known shares, shnum-to-[nodeids] self._cache = ResponseCache() self._most_recent_size = None # all users of this MutableFileNode go through the serializer. This # takes advantage of the fact that Deferreds discard the callbacks # that they're done with, so we can keep using the same Deferred # forever without consuming more and more memory. self._serializer = defer.succeed(None) def __repr__(self): if hasattr(self, '_uri'): return "<%s %x %s %s>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev()) else: return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None) def init_from_cap(self, filecap): # we have the URI, but we have not yet retrieved the public # verification key, nor things like 'k' or 'N'. If and when someone # wants to get our contents, we'll pull from shares and fill those # in. assert isinstance(filecap, (ReadonlySSKFileURI, WriteableSSKFileURI)) self._uri = filecap self._writekey = None if isinstance(filecap, WriteableSSKFileURI): self._writekey = self._uri.writekey self._readkey = self._uri.readkey self._storage_index = self._uri.storage_index self._fingerprint = self._uri.fingerprint # the following values are learned during Retrieval # self._pubkey # self._required_shares # self._total_shares # and these are needed for Publish. They are filled in by Retrieval # if possible, otherwise by the first peer that Publish talks to. self._privkey = None self._encprivkey = None return self def create_with_keys(self, (pubkey, privkey), contents): """Call this to create a brand-new mutable file. It will create the shares, find homes for them, and upload the initial contents (created with the same rules as IClient.create_mutable_file() ). Returns a Deferred that fires (with the MutableFileNode instance you should use) when it completes. """ self._pubkey, self._privkey = pubkey, privkey pubkey_s = self._pubkey.serialize() privkey_s = self._privkey.serialize() self._writekey = hashutil.ssk_writekey_hash(privkey_s) self._encprivkey = self._encrypt_privkey(self._writekey, privkey_s) self._fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) self._uri = WriteableSSKFileURI(self._writekey, self._fingerprint) self._readkey = self._uri.readkey self._storage_index = self._uri.storage_index initial_contents = self._get_initial_contents(contents) return self._upload(initial_contents, None)
def dump_SDMF_share(m, length, options): from allmydata.mutable.layout import unpack_share, unpack_header from allmydata.mutable.common import NeedMoreDataError from allmydata.util import base32, hashutil from allmydata.uri import SSKVerifierURI from allmydata.util.encodingutil import quote_output, to_str offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, 2000)) f.close() try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, size)) f.close() pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) print(" SDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % base32.b2a(root_hash), file=out) print(" IV: %s" % base32.b2a(IV), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join(sorted([str(hid) for hid in share_hash_chain.keys()])) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_str(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 2) for k in ["signature", "share_hash_chain", "block_hash_tree", "share_data", "enc_privkey", "EOF"]: name = {"share_data": "block data", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 2) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4) f.close() print(file=out)