def __init__(self, rref, server, data_size, block_size, num_segments, num_share_hashes, uri_extension_size_max, pipeline_size=50000): self._rref = rref self._server = server self._data_size = data_size self._block_size = block_size self._num_segments = num_segments effective_segments = mathutil.next_power_of_k(num_segments, 2) self._segment_hash_size = (2 * effective_segments - 1) * HASH_SIZE # how many share hashes are included in each share? This will be # about ln2(num_shares). self._share_hashtree_size = num_share_hashes * (2 + HASH_SIZE) # we commit to not sending a uri extension larger than this self._uri_extension_size_max = uri_extension_size_max self._create_offsets(block_size, data_size) # k=3, max_segment_size=128KiB gives us a typical segment of 43691 # bytes. Setting the default pipeline_size to 50KB lets us get two # segments onto the wire but not a third, which would keep the pipe # filled. self._pipeline = pipeline.Pipeline(pipeline_size)
def which_bucket(self, size): # return (min,max) such that min <= size <= max # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100), # (101,316), (317, 1000), etc: two per decade assert size >= 0 i = 0 while True: if i >= len(self.buckets): # extend the list new_lower = self.buckets[i-1][1]+1 new_upper = int(mathutil.next_power_of_k(new_lower, self.root)) self.buckets.append( (new_lower, new_upper) ) maybe = self.buckets[i] if maybe[0] <= size <= maybe[1]: return maybe i += 1
def which_bucket(self, size): # return (min,max) such that min <= size <= max # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100), # (101,316), (317, 1000), etc: two per decade assert size >= 0 i = 0 while True: if i >= len(self.buckets): # extend the list new_lower = self.buckets[i - 1][1] + 1 new_upper = int(mathutil.next_power_of_k(new_lower, self.root)) self.buckets.append((new_lower, new_upper)) maybe = self.buckets[i] if maybe[0] <= size <= maybe[1]: return maybe i += 1
def __init__(self, rref, server, data_size, block_size, num_segments, num_share_hashes, uri_extension_size_max, pipeline_size=50000): self._rref = rref self._server = server self._data_size = data_size self._block_size = block_size self._num_segments = num_segments effective_segments = mathutil.next_power_of_k(num_segments,2) self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE # how many share hashes are included in each share? This will be # about ln2(num_shares). self._share_hashtree_size = num_share_hashes * (2+HASH_SIZE) # we commit to not sending a uri extension larger than this self._uri_extension_size_max = uri_extension_size_max self._create_offsets(block_size, data_size) # k=3, max_segment_size=128KiB gives us a typical segment of 43691 # bytes. Setting the default pipeline_size to 50KB lets us get two # segments onto the wire but not a third, which would keep the pipe # filled. self._pipeline = pipeline.Pipeline(pipeline_size)
def calc(filesize, params=(3,7,10), segsize=DEFAULT_MAX_SEGMENT_SIZE): num_shares = params[2] if filesize <= upload.Uploader.URI_LIT_SIZE_THRESHOLD: urisize = len(uri.LiteralFileURI("A"*filesize).to_string()) sharesize = 0 sharespace = 0 else: u = upload.FileUploader(None) # XXX changed u.set_params(params) # unfortunately, Encoder doesn't currently lend itself to answering # this question without measuring a filesize, so we have to give it a # fake one data = BigFakeString(filesize) u.set_filehandle(data) u.set_encryption_key("a"*16) sharesize, blocksize = u.setup_encoder() # how much overhead? # 0x20 bytes of offsets # 0x04 bytes of extension length # 0x1ad bytes of extension (=429) # total is 465 bytes num_segments = mathutil.div_ceil(filesize, segsize) num_share_hashes = int(math.log(mathutil.next_power_of_k(num_shares, 2), 2)) + 1 sharesize = storage.allocated_size(sharesize, num_segments, num_share_hashes, 429) sharespace = num_shares * roundup(sharesize) urisize = len(uri.pack_uri(storage_index="a"*32, key="a"*16, uri_extension_hash="a"*32, needed_shares=params[0], total_shares=params[2], size=filesize)) return urisize, sharesize, sharespace
def calc(filesize, params=(3, 7, 10), segsize=DEFAULT_MAX_SEGMENT_SIZE): num_shares = params[2] if filesize <= upload.Uploader.URI_LIT_SIZE_THRESHOLD: urisize = len(uri.LiteralFileURI("A" * filesize).to_string()) sharesize = 0 sharespace = 0 else: u = upload.FileUploader(None) # XXX changed u.set_params(params) # unfortunately, Encoder doesn't currently lend itself to answering # this question without measuring a filesize, so we have to give it a # fake one data = BigFakeString(filesize) u.set_filehandle(data) u.set_encryption_key("a" * 16) sharesize, blocksize = u.setup_encoder() # how much overhead? # 0x20 bytes of offsets # 0x04 bytes of extension length # 0x1ad bytes of extension (=429) # total is 465 bytes num_segments = mathutil.div_ceil(filesize, segsize) num_share_hashes = int( math.log(mathutil.next_power_of_k(num_shares, 2), 2)) + 1 sharesize = storage.allocated_size(sharesize, num_segments, num_share_hashes, 429) sharespace = num_shares * roundup(sharesize) urisize = len( uri.pack_uri(storage_index="a" * 32, key="a" * 16, uri_extension_hash="a" * 32, needed_shares=params[0], total_shares=params[2], size=filesize)) return urisize, sharesize, sharespace