def write_good_share(storage_rref, storage_index, ueb, sharenums): """ Write a valid share with the given URI extension block. """ write_proxy = make_write_bucket_proxy( storage_rref, None, 1024, ueb["segment_size"], 1, 1, ueb["size"], ) # See allmydata/immutable/layout.py offset = write_proxy._offsets["uri_extension"] filler = b"\0" * (offset - len(write_proxy._offset_data)) ueb_data = uri.pack_extension(ueb) data = (write_proxy._offset_data + filler + pack(write_proxy.fieldstruct, len(ueb_data)) + ueb_data) return write_share(storage_rref, storage_index, sharenums, data)
def _guess_offsets(self, verifycap, guessed_segment_size): self.guessed_segment_size = guessed_segment_size size = verifycap.size k = verifycap.needed_shares N = verifycap.total_shares r = self._node._calculate_sizes(guessed_segment_size) # num_segments, block_size/tail_block_size # guessed_segment_size/tail_segment_size/tail_segment_padded share_size = mathutil.div_ceil(size, k) # share_size is the amount of block data that will be put into each # share, summed over all segments. It does not include hashes, the # UEB, or other overhead. # use the upload-side code to get this as accurate as possible ht = IncompleteHashTree(N) num_share_hashes = len(ht.needed_hashes(0, include_leaf=True)) wbp = make_write_bucket_proxy(None, None, share_size, r["block_size"], r["num_segments"], num_share_hashes, 0) self._fieldsize = wbp.fieldsize self._fieldstruct = wbp.fieldstruct self.guessed_offsets = wbp._offsets