def test_setup(self): fname = ".".join(self.id().split(".")[1:]) fname += ".bin" fname = os.path.join(thisdir, fname) if os.path.exists(fname): os.remove(fname) # pragma: no cover bsize = 10 blocks_per_bucket = 3 fsetup = EncryptedHeapStorage.setup( fname, bsize, self._heap_height, heap_base=self._heap_base, storage_type=self._storage_type, blocks_per_bucket=blocks_per_bucket) fsetup.close() self.assertEqual(type(fsetup.raw_storage), BlockStorageTypeFactory(self._storage_type)) with open(fname, 'rb') as f: flen = len(f.read()) self.assertEqual( flen, TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, blocks_per_bucket=blocks_per_bucket)) self.assertEqual( flen > TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, blocks_per_bucket=blocks_per_bucket, ignore_header=True), True) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.header_data, bytes()) self.assertEqual(fsetup.header_data, bytes()) self.assertEqual(f.key, fsetup.key) self.assertEqual(f.blocks_per_bucket, blocks_per_bucket) self.assertEqual(fsetup.blocks_per_bucket, blocks_per_bucket) self.assertEqual(f.bucket_count, (self._heap_base**(self._heap_height+1) - 1)//(self._heap_base-1)) self.assertEqual(fsetup.bucket_count, (self._heap_base**(self._heap_height+1) - 1)//(self._heap_base-1)) self.assertEqual(f.bucket_size, bsize * blocks_per_bucket) self.assertEqual(fsetup.bucket_size, bsize * blocks_per_bucket) self.assertEqual(f.storage_name, fname) self.assertEqual(fsetup.storage_name, fname) os.remove(fname)
def compute_storage_size(cls, block_size, block_count, bucket_capacity=4, heap_base=2, ignore_header=False, **kwds): assert (block_size > 0) and (block_size == int(block_size)) assert (block_count > 0) and (block_count == int(block_count)) assert bucket_capacity >= 1 assert heap_base >= 2 assert 'heap_height' not in kwds heap_height = calculate_necessary_heap_height(heap_base, block_count) block_size += TreeORAMStorageManagerExplicitAddressing.\ block_info_storage_size if ignore_header: return EncryptedHeapStorage.compute_storage_size( block_size, heap_height, blocks_per_bucket=bucket_capacity, heap_base=heap_base, ignore_header=True, **kwds) else: return cls._header_offset + \ EncryptedHeapStorage.compute_storage_size( block_size, heap_height, blocks_per_bucket=bucket_capacity, heap_base=heap_base, ignore_header=False, **kwds)
def test_setup(self): fname = ".".join(self.id().split(".")[1:]) fname += ".bin" fname = os.path.join(thisdir, fname) if os.path.exists(fname): os.remove(fname) # pragma: no cover bsize = 10 heap_height = 2 blocks_per_bucket = 3 fsetup = EncryptedHeapStorage.setup( fname, bsize, heap_height, key_size=AES.key_sizes[0], blocks_per_bucket=blocks_per_bucket) fsetup.close() self.assertEqual(type(fsetup.raw_storage), BlockStorageTypeFactory(self._type_name)) with open(fname, 'rb') as f: flen = len(f.read()) self.assertEqual( flen, EncryptedHeapStorage.compute_storage_size( bsize, heap_height, blocks_per_bucket=blocks_per_bucket)) self.assertEqual( flen > EncryptedHeapStorage.compute_storage_size( bsize, heap_height, blocks_per_bucket=blocks_per_bucket, ignore_header=True), True) with EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._type_name) as f: self.assertEqual(f.header_data, bytes()) self.assertEqual(fsetup.header_data, bytes()) self.assertEqual(f.key, fsetup.key) self.assertEqual(f.blocks_per_bucket, blocks_per_bucket) self.assertEqual(fsetup.blocks_per_bucket, blocks_per_bucket) self.assertEqual(f.bucket_count, 2**(heap_height+1) - 1) self.assertEqual(fsetup.bucket_count, 2**(heap_height+1) - 1) self.assertEqual(f.bucket_size, bsize * blocks_per_bucket) self.assertEqual(fsetup.bucket_size, bsize * blocks_per_bucket) self.assertEqual(f.storage_name, fname) self.assertEqual(fsetup.storage_name, fname) os.remove(fname)
def test_read_path_cloned(self): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as forig: self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0) with forig.clone_device() as f: self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0) self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, 0) self.assertEqual(f.virtual_heap.first_bucket_at_level(0), 0) self.assertNotEqual(f.virtual_heap.last_leaf_bucket(), 0) total_buckets = 0 for b in range(f.virtual_heap.first_bucket_at_level(0), f.virtual_heap.last_leaf_bucket() + 1): data = f.read_path(b) bucket_path = f.virtual_heap.Node(b).\ bucket_path_from_root() total_buckets += len(bucket_path) self.assertEqual( f.virtual_heap.Node(b).level + 1, len(bucket_path)) for i, bucket in zip(bucket_path, data): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) self.assertEqual(f.bytes_sent, 0) self.assertEqual( f.bytes_received, total_buckets * f.bucket_storage._storage.block_size) self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0)
def test_init_noexists(self): self.assertEqual(os.path.exists(self._dummy_name), False) with self.assertRaises(IOError): with EncryptedHeapStorage(self._dummy_name, key=self._key, storage_type=self._type_name) as f: pass # pragma: no cover
def test_write_path_cloned(self): data = [bytearray([self._bucket_count]) * \ self._block_size * \ self._blocks_per_bucket for i in xrange(self._block_count)] with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as forig: self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0) with forig.clone_device() as f: self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0) self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, 0) self.assertEqual(f.virtual_heap.first_bucket_at_level(0), 0) self.assertNotEqual(f.virtual_heap.last_leaf_bucket(), 0) total_buckets = 0 for b in range(f.virtual_heap.first_bucket_at_level(0), f.virtual_heap.last_leaf_bucket() + 1): orig = f.read_path(b) bucket_path = f.virtual_heap.Node(b).\ bucket_path_from_root() total_buckets += len(bucket_path) self.assertNotEqual(len(bucket_path), 0) self.assertEqual( f.virtual_heap.Node(b).level + 1, len(bucket_path)) self.assertEqual(len(orig), len(bucket_path)) for i, bucket in zip(bucket_path, orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) f.write_path(b, [bytes(data[i]) for i in bucket_path]) new = f.read_path(b) self.assertEqual(len(new), len(bucket_path)) for i, bucket in zip(bucket_path, new): self.assertEqual(list(bytearray(bucket)), list(data[i])) f.write_path( b, [bytes(self._buckets[i]) for i in bucket_path]) orig = f.read_path(b) self.assertEqual(len(orig), len(bucket_path)) for i, bucket in zip(bucket_path, orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) self.assertEqual( f.bytes_sent, total_buckets * f.bucket_storage._storage.block_size * 2) self.assertEqual( f.bytes_received, total_buckets * f.bucket_storage._storage.block_size * 3) self.assertEqual(forig.bytes_sent, 0) self.assertEqual(forig.bytes_received, 0)
def test_factory(self): kwds = dict(self._init_kwds) kwds['cached_levels'] = 0 with EncryptedHeapStorage( self._testfname, key=self._key, storage_type=self._storage_type) as f1: with TopCachedEncryptedHeapStorage(f1, **kwds) as f2: self.assertTrue(f1 is f2)
def main(): # # get a unique filename in the current directory # fid, tmpname = tempfile.mkstemp(dir=os.getcwd()) os.close(fid) os.remove(tmpname) print("Storage Name: %s" % (tmpname)) key_size = 32 header_data = b'a message' heap_base = 3 heap_height = 2 block_size = 8 blocks_per_bucket = 4 initialize = lambda i: \ bytes(bytearray([i] * block_size * blocks_per_bucket)) vheap = SizedVirtualHeap(heap_base, heap_height, blocks_per_bucket=blocks_per_bucket) with EncryptedHeapStorage.setup(tmpname, block_size, heap_height, key_size=key_size, header_data=header_data, heap_base=heap_base, blocks_per_bucket=blocks_per_bucket, initialize=initialize) as f: assert tmpname == f.storage_name assert f.header_data == header_data print(f.read_path(vheap.random_bucket())) key = f.key assert os.path.exists(tmpname) with EncryptedHeapStorage(tmpname, key=key) as f: assert tmpname == f.storage_name assert f.header_data == header_data print(f.read_path(vheap.random_bucket())) # # cleanup # os.remove(tmpname)
def test_locked_flag(self): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: with self.assertRaises(IOError): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f1: pass # pragma: no cover with self.assertRaises(IOError): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f1: pass # pragma: no cover with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type, ignore_lock=True), **self._init_kwds) as f1: pass with self.assertRaises(IOError): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f1: pass # pragma: no cover with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type, ignore_lock=True), **self._init_kwds) as f1: pass with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type, ignore_lock=True), **self._init_kwds) as f1: pass with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: pass
def test_init_exists(self): self.assertEqual(os.path.exists(self._testfname), True) with EncryptedBlockStorage(self._testfname, key=self._key, storage_type=self._storage_type) as f: databefore = f.read_blocks(list(range(f.block_count))) with self.assertRaises(ValueError): with EncryptedBlockStorage(self._testfname, key=self._key, storage_type=self._storage_type) as fb: with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(fb, key=self._key), **self._init_kwds) as f: pass # pragma: no cover with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.key, self._key) self.assertEqual(f.bucket_size, self._block_size * \ self._blocks_per_bucket) self.assertEqual(f.bucket_count, self._bucket_count) self.assertEqual(f.storage_name, self._testfname) self.assertEqual(f.header_data, bytes()) self.assertEqual(os.path.exists(self._testfname), True) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: dataafter = f.bucket_storage.read_blocks( list(range(f.bucket_storage.block_count))) self.assertEqual(databefore, dataafter)
def test_update_header_data(self): fname = ".".join(self.id().split(".")[1:]) fname += ".bin" fname = os.path.join(thisdir, fname) if os.path.exists(fname): os.remove(fname) # pragma: no cover bsize = 10 blocks_per_bucket = 1 header_data = bytes(bytearray([0,1,2])) fsetup = EncryptedHeapStorage.setup( fname, bsize, self._heap_height, heap_base=self._heap_base, blocks_per_bucket=blocks_per_bucket, header_data=header_data) fsetup.close() new_header_data = bytes(bytearray([1,1,1])) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.header_data, header_data) f.update_header_data(new_header_data) self.assertEqual(f.header_data, new_header_data) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.header_data, new_header_data) with self.assertRaises(ValueError): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: f.update_header_data(bytes(bytearray([1,1]))) with self.assertRaises(ValueError): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: f.update_header_data(bytes(bytearray([1,1,1,1]))) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.header_data, new_header_data) os.remove(fname)
def test_setup_withdata(self): fname = ".".join(self.id().split(".")[1:]) fname += ".bin" fname = os.path.join(thisdir, fname) if os.path.exists(fname): os.remove(fname) # pragma: no cover bsize = 10 heap_height = 2 blocks_per_bucket = 1 header_data = bytes(bytearray([0, 1, 2])) fsetup = EncryptedHeapStorage.setup( fname, bsize, heap_height, key_size=AES.key_sizes[0], blocks_per_bucket=blocks_per_bucket, header_data=header_data) fsetup.close() self.assertEqual(type(fsetup.raw_storage), BlockStorageTypeFactory(self._type_name)) with open(fname, 'rb') as f: flen = len(f.read()) self.assertEqual( flen, EncryptedHeapStorage.compute_storage_size( bsize, heap_height, header_data=header_data)) self.assertTrue(len(header_data) > 0) self.assertEqual( EncryptedHeapStorage.compute_storage_size( bsize, heap_height, storage_type=self._type_name) < EncryptedHeapStorage.compute_storage_size( bsize, heap_height, storage_type=self._type_name, header_data=header_data), True) self.assertEqual( flen > EncryptedHeapStorage.compute_storage_size( bsize, heap_height, storage_type=self._type_name, header_data=header_data, ignore_header=True), True) with EncryptedHeapStorage(fname, key=fsetup.key, storage_type=self._type_name) as f: self.assertEqual(f.header_data, header_data) self.assertEqual(fsetup.header_data, header_data) self.assertEqual(f.key, fsetup.key) self.assertEqual(f.blocks_per_bucket, blocks_per_bucket) self.assertEqual(fsetup.blocks_per_bucket, blocks_per_bucket) self.assertEqual(f.bucket_count, 2**(heap_height + 1) - 1) self.assertEqual(fsetup.bucket_count, 2**(heap_height + 1) - 1) self.assertEqual(f.bucket_size, bsize * blocks_per_bucket) self.assertEqual(fsetup.bucket_size, bsize * blocks_per_bucket) self.assertEqual(f.storage_name, fname) self.assertEqual(fsetup.storage_name, fname) os.remove(fname)
def main(): # # get a unique filename in the current directory # fid, tmpname = tempfile.mkstemp(dir=os.getcwd()) os.close(fid) os.remove(tmpname) print("Storage Name: %s" % (tmpname)) key_size = 32 header_data = b'a message' heap_base = 3 heap_height = 2 block_size = 8 blocks_per_bucket=4 initialize = lambda i: \ bytes(bytearray([i] * block_size * blocks_per_bucket)) vheap = SizedVirtualHeap( heap_base, heap_height, blocks_per_bucket=blocks_per_bucket) with EncryptedHeapStorage.setup( tmpname, block_size, heap_height, key_size=key_size, header_data=header_data, heap_base=heap_base, blocks_per_bucket=blocks_per_bucket, initialize=initialize) as f: assert tmpname == f.storage_name assert f.header_data == header_data print(f.read_path(vheap.random_bucket())) key = f.key assert os.path.exists(tmpname) with EncryptedHeapStorage(tmpname, key=key) as f: assert tmpname == f.storage_name assert f.header_data == header_data print(f.read_path(vheap.random_bucket())) # # cleanup # os.remove(tmpname)
def test_locked_flag(self): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as f: with self.assertRaises(IOError): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as f1: pass # pragma: no cover with self.assertRaises(IOError): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as f1: pass # pragma: no cover with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name, ignore_lock=True) as f1: pass with self.assertRaises(IOError): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as f1: pass # pragma: no cover with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name, ignore_lock=True) as f1: pass with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name, ignore_lock=True) as f1: pass with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as f: pass
def test_read_path(self): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, 0) self.assertEqual( f.virtual_heap.first_bucket_at_level(0), 0) self.assertNotEqual( f.virtual_heap.last_leaf_bucket(), 0) total_buckets = 0 for b in range(f.virtual_heap.first_bucket_at_level(0), f.virtual_heap.last_leaf_bucket()+1): full_bucket_path = f.virtual_heap.Node(b).\ bucket_path_from_root() all_level_starts = list(range(len(full_bucket_path)+1)) for level_start in all_level_starts: data = f.read_path(b, level_start=level_start) bucket_path = full_bucket_path[level_start:] if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_buckets += len(bucket_path) else: total_buckets += len(full_bucket_path[f._external_level:]) self.assertEqual(f.virtual_heap.Node(b).level+1-level_start, len(bucket_path)) for i, bucket in zip(bucket_path, data): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, total_buckets*f.bucket_storage._storage.block_size)
def test_cache_size(self): with TopCachedEncryptedHeapStorage( EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: num_cached_levels = self._init_kwds.get('cached_levels', 1) if num_cached_levels < 0: num_cached_levels = f.virtual_heap.levels cache_bucket_count = 0 for l in xrange(num_cached_levels): if l <= f.virtual_heap.last_level: cache_bucket_count += f.virtual_heap.bucket_count_at_level(l) self.assertEqual(cache_bucket_count > 0, True) self.assertEqual(len(f.cached_bucket_data), cache_bucket_count * f.bucket_size) self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, 0) self.assertEqual(f._root_device.bytes_sent, 0) self.assertEqual( f._root_device.bytes_received, cache_bucket_count*f._root_device.bucket_storage._storage.block_size)
def setUpClass(cls): assert cls._init_kwds is not None assert cls._storage_type is not None assert cls._heap_base is not None assert cls._heap_height is not None fd, cls._dummy_name = tempfile.mkstemp() os.close(fd) try: os.remove(cls._dummy_name) except OSError: # pragma: no cover pass # pragma: no cover cls._block_size = 50 cls._blocks_per_bucket = 3 cls._bucket_count = \ ((cls._heap_base**(cls._heap_height+1)) - 1)//(cls._heap_base-1) cls._block_count = cls._bucket_count * \ cls._blocks_per_bucket cls._testfname = cls.__name__ + "_testfile.bin" cls._buckets = [] f = EncryptedHeapStorage.setup( cls._testfname, cls._block_size, cls._heap_height, heap_base=cls._heap_base, blocks_per_bucket=cls._blocks_per_bucket, storage_type=cls._storage_type, initialize=lambda i: bytes(bytearray([i]) * \ cls._block_size * \ cls._blocks_per_bucket), ignore_existing=True) f.close() cls._key = f.key for i in range(cls._bucket_count): data = bytearray([i]) * \ cls._block_size * \ cls._blocks_per_bucket cls._buckets.append(data)
def test_setup_withdata(self): fname = ".".join(self.id().split(".")[1:]) fname += ".bin" fname = os.path.join(thisdir, fname) if os.path.exists(fname): os.remove(fname) # pragma: no cover bsize = 10 blocks_per_bucket = 1 header_data = bytes(bytearray([0,1,2])) fsetup = EncryptedHeapStorage.setup( fname, bsize, self._heap_height, heap_base=self._heap_base, storage_type=self._storage_type, blocks_per_bucket=blocks_per_bucket, header_data=header_data) fsetup.close() self.assertEqual(type(fsetup.raw_storage), BlockStorageTypeFactory(self._storage_type)) with open(fname, 'rb') as f: flen = len(f.read()) self.assertEqual( flen, TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, header_data=header_data)) self.assertTrue(len(header_data) > 0) self.assertEqual( TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, storage_type=self._storage_type) < TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, storage_type=self._storage_type, header_data=header_data), True) self.assertEqual( flen > TopCachedEncryptedHeapStorage.compute_storage_size( bsize, self._heap_height, heap_base=self._heap_base, storage_type=self._storage_type, header_data=header_data, ignore_header=True), True) with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( fname, key=fsetup.key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.header_data, header_data) self.assertEqual(fsetup.header_data, header_data) self.assertEqual(f.key, fsetup.key) self.assertEqual(f.blocks_per_bucket, blocks_per_bucket) self.assertEqual(fsetup.blocks_per_bucket, blocks_per_bucket) self.assertEqual(f.bucket_count, (self._heap_base**(self._heap_height+1) - 1)//(self._heap_base-1)) self.assertEqual(fsetup.bucket_count, (self._heap_base**(self._heap_height+1) - 1)//(self._heap_base-1)) self.assertEqual(f.bucket_size, bsize * blocks_per_bucket) self.assertEqual(fsetup.bucket_size, bsize * blocks_per_bucket) self.assertEqual(f.storage_name, fname) self.assertEqual(fsetup.storage_name, fname) os.remove(fname)
def __init__(self, storage, stash, position_map, **kwds): self._oram = None self._block_count = None if isinstance(storage, EncryptedHeapStorageInterface): storage_heap = storage close_storage_heap = False if len(kwds): raise ValueError("Keywords not used when initializing " "with a storage device: %s" % (str(kwds))) else: cached_levels = kwds.pop('cached_levels', 3) concurrency_level = kwds.pop('concurrency_level', None) close_storage_heap = True storage_heap = TopCachedEncryptedHeapStorage( EncryptedHeapStorage(storage, **kwds), cached_levels=cached_levels, concurrency_level=concurrency_level) (self._block_count, ) = struct.unpack( self._header_struct_string, storage_heap.header_data[:self._header_offset]) stashdigest = storage_heap.\ header_data[:hashlib.sha384().digest_size] positiondigest = storage_heap.\ header_data[hashlib.sha384().digest_size:\ (2*hashlib.sha384().digest_size)] try: if stashdigest != \ PathORAM.stash_digest( stash, digestmod=hmac.HMAC(key=storage_heap.key, digestmod=hashlib.sha384)): raise ValueError("Stash HMAC does not match that saved with " "storage heap %s" % (storage_heap.storage_name)) except: if close_storage_heap: storage_heap.close() raise try: if positiondigest != \ PathORAM.position_map_digest( position_map, digestmod=hmac.HMAC(key=storage_heap.key, digestmod=hashlib.sha384)): raise ValueError( "Position map HMAC does not match that saved with " "storage heap %s" % (storage_heap.storage_name)) except: if close_storage_heap: storage_heap.close() raise self._oram = TreeORAMStorageManagerExplicitAddressing( storage_heap, stash, position_map) assert self._block_count <= \ self._oram.storage_heap.bucket_count
def test_setup_fails(self): self.assertEqual(os.path.exists(self._dummy_name), False) with self.assertRaises(IOError): EncryptedHeapStorage.setup(os.path.join(thisdir, "baselines", "exists.empty"), block_size=10, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) with self.assertRaises(IOError): EncryptedHeapStorage.setup(os.path.join(thisdir, "baselines", "exists.empty"), block_size=10, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name, ignore_existing=False) self.assertEqual(os.path.exists(self._dummy_name), False) # bad block_size with self.assertRaises(ValueError): EncryptedHeapStorage.setup(self._dummy_name, block_size=0, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad heap_height with self.assertRaises(ValueError): EncryptedHeapStorage.setup(self._dummy_name, block_size=1, heap_height=-1, blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad blocks_per_bucket with self.assertRaises(ValueError): EncryptedHeapStorage.setup(self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=0, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad heap_base with self.assertRaises(ValueError): EncryptedHeapStorage.setup(self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, heap_base=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad header_data with self.assertRaises(TypeError): EncryptedHeapStorage.setup(self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name, header_data=2) self.assertEqual(os.path.exists(self._dummy_name), False) # uses block_count with self.assertRaises(ValueError): EncryptedHeapStorage.setup(self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, block_count=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False)
def test_write_path(self): data = [bytearray([self._bucket_count]) * \ self._block_size * \ self._blocks_per_bucket for i in xrange(self._block_count)] with TopCachedEncryptedHeapStorage( EncryptedHeapStorage( self._testfname, key=self._key, storage_type=self._storage_type), **self._init_kwds) as f: self.assertEqual(f.bytes_sent, 0) self.assertEqual(f.bytes_received, 0) self.assertEqual( f.virtual_heap.first_bucket_at_level(0), 0) self.assertNotEqual( f.virtual_heap.last_leaf_bucket(), 0) all_buckets = list(range(f.virtual_heap.first_bucket_at_level(0), f.virtual_heap.last_leaf_bucket()+1)) random.shuffle(all_buckets) total_read_buckets = 0 total_write_buckets = 0 for b in all_buckets: full_bucket_path = f.virtual_heap.Node(b).\ bucket_path_from_root() all_level_starts = list(range(len(full_bucket_path)+1)) random.shuffle(all_level_starts) for level_start in all_level_starts: orig = f.read_path(b, level_start=level_start) bucket_path = full_bucket_path[level_start:] if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_read_buckets += len(bucket_path) else: total_read_buckets += len(full_bucket_path[f._external_level:]) if level_start != len(full_bucket_path): self.assertNotEqual(len(bucket_path), 0) self.assertEqual(f.virtual_heap.Node(b).level+1-level_start, len(bucket_path)) self.assertEqual(len(orig), len(bucket_path)) for i, bucket in zip(bucket_path, orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) f.write_path(b, [bytes(data[i]) for i in bucket_path], level_start=level_start) if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_write_buckets += len(bucket_path) else: total_write_buckets += len(full_bucket_path[f._external_level:]) new = f.read_path(b, level_start=level_start) if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_read_buckets += len(bucket_path) else: total_read_buckets += len(full_bucket_path[f._external_level:]) self.assertEqual(len(new), len(bucket_path)) for i, bucket in zip(bucket_path, new): self.assertEqual(list(bytearray(bucket)), list(data[i])) f.write_path(b, [bytes(self._buckets[i]) for i in bucket_path], level_start=level_start) if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_write_buckets += len(bucket_path) else: total_write_buckets += len(full_bucket_path[f._external_level:]) orig = f.read_path(b, level_start=level_start) if len(full_bucket_path) <= f._external_level: pass elif level_start >= f._external_level: total_read_buckets += len(bucket_path) else: total_read_buckets += len(full_bucket_path[f._external_level:]) self.assertEqual(len(orig), len(bucket_path)) for i, bucket in zip(bucket_path, orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) full_orig = f.read_path(b) if len(full_bucket_path) <= f._external_level: pass else: total_read_buckets += len(full_bucket_path[f._external_level:]) for i, bucket in zip(full_bucket_path, full_orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) for c in xrange(self._heap_base): cn = f.virtual_heap.Node(b).child_node(c) if not f.virtual_heap.is_nil_node(cn): cb = cn.bucket bucket_path = f.virtual_heap.Node(cb).\ bucket_path_from_root() orig = f.read_path(cb) if len(bucket_path) <= f._external_level: pass else: total_read_buckets += len(bucket_path[f._external_level:]) self.assertEqual(len(orig), len(bucket_path)) for i, bucket in zip(bucket_path, orig): self.assertEqual(list(bytearray(bucket)), list(self._buckets[i])) self.assertEqual(f.bytes_sent, total_write_buckets*f.bucket_storage._storage.block_size) self.assertEqual(f.bytes_received, total_read_buckets*f.bucket_storage._storage.block_size)
def setup(cls, storage_name, block_size, block_count, bucket_capacity=4, heap_base=2, cached_levels=3, concurrency_level=None, **kwds): if 'heap_height' in kwds: raise ValueError("'heap_height' keyword is not accepted") if (bucket_capacity <= 0) or \ (bucket_capacity != int(bucket_capacity)): raise ValueError("Bucket capacity must be a positive integer: %s" % (bucket_capacity)) if (block_size <= 0) or (block_size != int(block_size)): raise ValueError( "Block size (bytes) must be a positive integer: %s" % (block_size)) if (block_count <= 0) or (block_count != int(block_count)): raise ValueError("Block count must be a positive integer: %s" % (block_count)) if heap_base < 2: raise ValueError( "heap base must be 2 or greater. Invalid value: %s" % (heap_base)) heap_height = calculate_necessary_heap_height(heap_base, block_count) stash = {} vheap = SizedVirtualHeap(heap_base, heap_height, blocks_per_bucket=bucket_capacity) position_map = cls._init_position_map(vheap, block_count) oram_block_size = block_size + \ TreeORAMStorageManagerExplicitAddressing.\ block_info_storage_size user_header_data = kwds.pop('header_data', bytes()) if type(user_header_data) is not bytes: raise TypeError("'header_data' must be of type bytes. " "Invalid type: %s" % (type(user_header_data))) initialize = kwds.pop('initialize', None) header_data = struct.pack(cls._header_struct_string, block_count) kwds['header_data'] = bytes(header_data) + user_header_data empty_bucket = bytearray(oram_block_size * bucket_capacity) empty_bucket_view = memoryview(empty_bucket) for i in xrange(bucket_capacity): TreeORAMStorageManagerExplicitAddressing.tag_block_as_empty( empty_bucket_view[(i*oram_block_size):\ ((i+1)*oram_block_size)]) empty_bucket = bytes(empty_bucket) kwds['initialize'] = lambda i: empty_bucket f = None try: log.info("%s: setting up encrypted heap storage" % (cls.__name__)) f = EncryptedHeapStorage.setup(storage_name, oram_block_size, heap_height, heap_base=heap_base, blocks_per_bucket=bucket_capacity, **kwds) if cached_levels != 0: f = TopCachedEncryptedHeapStorage( f, cached_levels=cached_levels, concurrency_level=concurrency_level) elif concurrency_level is not None: raise ValueError( # pragma: no cover "'concurrency_level' keyword is " # pragma: no cover "not used when no heap levels " # pragma: no cover "are cached") # pragma: no cover oram = TreeORAMStorageManagerExplicitAddressing( f, stash, position_map) if initialize is None: zeros = bytes(bytearray(block_size)) initialize = lambda i: zeros initial_oram_block = bytearray(oram_block_size) for i in tqdm.tqdm(xrange(block_count), desc=("Initializing %s Blocks" % (cls.__name__)), total=block_count, disable=not pyoram.config.SHOW_PROGRESS_BAR): oram.tag_block_with_id(initial_oram_block, i) initial_oram_block[oram.block_info_storage_size:] = \ initialize(i)[:] bucket = oram.position_map[i] bucket_level = vheap.Node(bucket).level oram.position_map[i] = \ oram.storage_heap.virtual_heap.\ random_bucket_at_level(bucket_level) oram.load_path(bucket) oram.push_down_path() # place a copy in the stash oram.stash[i] = bytearray(initial_oram_block) oram.fill_path_from_stash() oram.evict_path() header_data = bytearray(header_data) stash_digest = cls.stash_digest(oram.stash, digestmod=hmac.HMAC( key=oram.storage_heap.key, digestmod=hashlib.sha384)) position_map_digest = cls.position_map_digest( oram.position_map, digestmod=hmac.HMAC(key=oram.storage_heap.key, digestmod=hashlib.sha384)) header_data[:len(stash_digest)] = stash_digest[:] header_data[len(stash_digest):\ (len(stash_digest)+len(position_map_digest))] = \ position_map_digest[:] f.update_header_data(bytes(header_data) + user_header_data) return PathORAM(f, stash, position_map=position_map) except: if f is not None: f.close() # pragma: no cover raise
def main(): storage_name = "heap.bin" print("Storage Name: %s" % (storage_name)) key_size = 32 heap_base = 2 heap_height = 2 block_size = struct.calcsize("!?LL") blocks_per_bucket = 2 vheap = SizedVirtualHeap( heap_base, heap_height, blocks_per_bucket=blocks_per_bucket) print("Block Size: %s" % (block_size)) print("Blocks Per Bucket: %s" % (blocks_per_bucket)) position_map = {} def initialize(i): bucket = bytes() for j in range(blocks_per_bucket): if (i*j) % 3: bucket += struct.pack( "!?LL", False, 0, 0) else: x = vheap.Node(i) while not vheap.is_nil_node(x): x = x.child_node(random.randint(0, heap_base-1)) x = x.parent_node() bucket += struct.pack( "!?LL", True, initialize.id_, x.bucket) position_map[initialize.id_] = x.bucket initialize.id_ += 1 return bucket initialize.id_ = 1 with EncryptedHeapStorage.setup( storage_name, block_size, heap_height, heap_base=heap_base, key_size=key_size, blocks_per_bucket=blocks_per_bucket, initialize=initialize, ignore_existing=True) as f: assert storage_name == f.storage_name stash = {} oram = TreeORAMStorageManagerPointerAddressing(f, stash) b = vheap.random_bucket() oram.load_path(b) print("") print(repr(vheap.Node(oram.path_stop_bucket))) print(oram.path_block_ids) print(oram.path_block_eviction_levels) oram.push_down_path() print("") print(repr(vheap.Node(oram.path_stop_bucket))) print(oram.path_block_ids) print(oram.path_block_eviction_levels) print(oram.path_block_reordering) oram.evict_path() oram.load_path(b) print("") print(repr(vheap.Node(oram.path_stop_bucket))) print(oram.path_block_ids) print(oram.path_block_eviction_levels) oram.push_down_path() print("") print(repr(vheap.Node(oram.path_stop_bucket))) print(oram.path_block_ids) print(oram.path_block_eviction_levels) print(oram.path_block_reordering) assert all(x is None for x in oram.path_block_reordering) os.remove(storage_name)
def compute_storage_size(cls, *args, **kwds): return EncryptedHeapStorage.compute_storage_size(*args, **kwds)
def setup(cls, storage_name, block_size, block_count, bucket_capacity=4, heap_base=2, cached_levels=3, concurrency_level=None, **kwds): if 'heap_height' in kwds: raise ValueError("'heap_height' keyword is not accepted") if (bucket_capacity <= 0) or \ (bucket_capacity != int(bucket_capacity)): raise ValueError( "Bucket capacity must be a positive integer: %s" % (bucket_capacity)) if (block_size <= 0) or (block_size != int(block_size)): raise ValueError( "Block size (bytes) must be a positive integer: %s" % (block_size)) if (block_count <= 0) or (block_count != int(block_count)): raise ValueError( "Block count must be a positive integer: %s" % (block_count)) if heap_base < 2: raise ValueError( "heap base must be 2 or greater. Invalid value: %s" % (heap_base)) heap_height = calculate_necessary_heap_height(heap_base, block_count) stash = {} vheap = SizedVirtualHeap( heap_base, heap_height, blocks_per_bucket=bucket_capacity) position_map = cls._init_position_map(vheap, block_count) oram_block_size = block_size + \ TreeORAMStorageManagerExplicitAddressing.\ block_info_storage_size user_header_data = kwds.pop('header_data', bytes()) if type(user_header_data) is not bytes: raise TypeError( "'header_data' must be of type bytes. " "Invalid type: %s" % (type(user_header_data))) initialize = kwds.pop('initialize', None) header_data = struct.pack( cls._header_struct_string, block_count) kwds['header_data'] = bytes(header_data) + user_header_data empty_bucket = bytearray(oram_block_size * bucket_capacity) empty_bucket_view = memoryview(empty_bucket) for i in xrange(bucket_capacity): TreeORAMStorageManagerExplicitAddressing.tag_block_as_empty( empty_bucket_view[(i*oram_block_size):\ ((i+1)*oram_block_size)]) empty_bucket = bytes(empty_bucket) kwds['initialize'] = lambda i: empty_bucket f = None try: log.info("%s: setting up encrypted heap storage" % (cls.__name__)) f = EncryptedHeapStorage.setup(storage_name, oram_block_size, heap_height, heap_base=heap_base, blocks_per_bucket=bucket_capacity, **kwds) if cached_levels != 0: f = TopCachedEncryptedHeapStorage( f, cached_levels=cached_levels, concurrency_level=concurrency_level) elif concurrency_level is not None: raise ValueError( # pragma: no cover "'concurrency_level' keyword is " # pragma: no cover "not used when no heap levels " # pragma: no cover "are cached") # pragma: no cover oram = TreeORAMStorageManagerExplicitAddressing( f, stash, position_map) if initialize is None: zeros = bytes(bytearray(block_size)) initialize = lambda i: zeros initial_oram_block = bytearray(oram_block_size) for i in tqdm.tqdm(xrange(block_count), desc=("Initializing %s Blocks" % (cls.__name__)), total=block_count, disable=not pyoram.config.SHOW_PROGRESS_BAR): oram.tag_block_with_id(initial_oram_block, i) initial_oram_block[oram.block_info_storage_size:] = \ initialize(i)[:] bucket = oram.position_map[i] bucket_level = vheap.Node(bucket).level oram.position_map[i] = \ oram.storage_heap.virtual_heap.\ random_bucket_at_level(bucket_level) oram.load_path(bucket) oram.push_down_path() # place a copy in the stash oram.stash[i] = bytearray(initial_oram_block) oram.fill_path_from_stash() oram.evict_path() header_data = bytearray(header_data) stash_digest = cls.stash_digest( oram.stash, digestmod=hmac.HMAC(key=oram.storage_heap.key, digestmod=hashlib.sha384)) position_map_digest = cls.position_map_digest( oram.position_map, digestmod=hmac.HMAC(key=oram.storage_heap.key, digestmod=hashlib.sha384)) header_data[:len(stash_digest)] = stash_digest[:] header_data[len(stash_digest):\ (len(stash_digest)+len(position_map_digest))] = \ position_map_digest[:] f.update_header_data(bytes(header_data) + user_header_data) return PathORAM(f, stash, position_map=position_map) except: if f is not None: f.close() # pragma: no cover raise
def test_setup_fails(self): self.assertEqual(os.path.exists(self._dummy_name), False) with self.assertRaises(IOError): EncryptedHeapStorage.setup( os.path.join(thisdir, "baselines", "exists.empty"), block_size=10, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) with self.assertRaises(IOError): EncryptedHeapStorage.setup( os.path.join(thisdir, "baselines", "exists.empty"), block_size=10, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name, ignore_existing=False) self.assertEqual(os.path.exists(self._dummy_name), False) # bad block_size with self.assertRaises(ValueError): EncryptedHeapStorage.setup( self._dummy_name, block_size=0, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad heap_height with self.assertRaises(ValueError): EncryptedHeapStorage.setup( self._dummy_name, block_size=1, heap_height=-1, blocks_per_bucket=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad blocks_per_bucket with self.assertRaises(ValueError): EncryptedHeapStorage.setup( self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=0, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad heap_base with self.assertRaises(ValueError): EncryptedHeapStorage.setup( self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, heap_base=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False) # bad header_data with self.assertRaises(TypeError): EncryptedHeapStorage.setup( self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, storage_type=self._type_name, header_data=2) self.assertEqual(os.path.exists(self._dummy_name), False) # uses block_count with self.assertRaises(ValueError): EncryptedHeapStorage.setup( self._dummy_name, block_size=1, heap_height=1, key_size=AES.key_sizes[0], blocks_per_bucket=1, block_count=1, storage_type=self._type_name) self.assertEqual(os.path.exists(self._dummy_name), False)
def test_init_exists(self): self.assertEqual(os.path.exists(self._testfname), True) with open(self._testfname, 'rb') as f: databefore = f.read() # no key with self.assertRaises(ValueError): with PathORAM(self._testfname, self._stash, self._position_map, storage_type=self._type_name, **self._kwds) as f: pass # pragma: no cover # stash does not match digest with self.assertRaises(ValueError): with PathORAM(self._testfname, {1: bytes()}, self._position_map, key=self._key, storage_type=self._type_name, **self._kwds) as f: pass # pragma: no cover # stash hash invalid key (negative) with self.assertRaises(ValueError): with PathORAM(self._testfname, {-1: bytes()}, self._position_map, key=self._key, storage_type=self._type_name, **self._kwds) as f: pass # pragma: no cover # position map has invalid item (negative) with self.assertRaises(ValueError): with PathORAM(self._testfname, self._stash, [-1], key=self._key, storage_type=self._type_name, **self._kwds) as f: pass # pragma: no cover # position map does not match digest with self.assertRaises(ValueError): with PathORAM(self._testfname, self._stash, [1], key=self._key, storage_type=self._type_name, **self._kwds) as f: pass # pragma: no cover with self.assertRaises(ValueError): with EncryptedHeapStorage(self._testfname, key=self._key, storage_type=self._type_name) as fb: with PathORAM(fb, self._stash, self._position_map, key=self._key, storage_type=self._type_name, **self._kwds) as f: self.assertIs(f.heap_storage, fb) pass # pragma: no cover with PathORAM(self._testfname, self._stash, self._position_map, key=self._key, storage_type=self._type_name, **self._kwds) as f: self.assertEqual(f.key, self._key) self.assertEqual(f.block_size, self._block_size) self.assertEqual(f.block_count, self._block_count) self.assertEqual(f.storage_name, self._testfname) self.assertEqual(f.header_data, bytes()) self.assertEqual(os.path.exists(self._testfname), True) with open(self._testfname, 'rb') as f: dataafter = f.read() self.assertEqual(databefore[-(self._block_count*self._block_size):], dataafter[-(self._block_count*self._block_size):])