def test_sequences_reloaded(self): # Test that all the sequences created get restored # Create storage and a container self.reset_storage_params("a") storage1 = Storage.DirectoryStorage(self.storage_params) storage1.configure(self.CONFIGURATION, None) seq_id1 = storage1.create_sequence() for i in range(4): container = storage1.create_container() container.add_block(Digest.dataDigest("m"), Container.CODE_DATA, "m") container.finish_dump() container.upload() storage1.close() # Create a new db to simulate a different machine self.reset_storage_params("b") storage2 = Storage.DirectoryStorage(self.storage_params) class Handler: def __init__(self): self.sequences = [] def is_requested(self, sequence_id, container_idx, digest, code): self.sequences.append((sequence_id, container_idx, digest, code)) return False handler = Handler() storage2.configure(self.CONFIGURATION, handler) self.assert_((seq_id1, 0, Digest.dataDigest("m"), Container.CODE_DATA) in handler.sequences) storage2.close()
def test_epoch(self): # Test that when we increment the epoch, some blocks are gone bm = BlockManager.BlockManager(self.env, None) # There are 4 blocks: aaa, bbb, ccc, ddd. # - aaa is CODE_DATA, but is not used, so it must go away after a certain # number of epochs # - bbb is CODE_DATA, and is used once, so it will not go away for a long # time. # - ccc is CODE_DATA, and is used 10 times, so it will not go away for a # very long time. # - ddd is CODE_DIR, so it is never supposed to go away, although it's never # used. aaa_d = Digest.dataDigest("aaa") bbb_d = Digest.dataDigest("bbb") ccc_d = Digest.dataDigest("ccc") ddd_d = Digest.dataDigest("ddd") bm.handle_block(aaa_d, Container.CODE_DATA, "aaa") bm.handle_block(bbb_d, Container.CODE_DATA, "bbb") bm.handle_block(ccc_d, Container.CODE_DATA, "ccc") bm.handle_block(ddd_d, Container.CODE_DIR, "ddd") self.assert_(bm.has_block(aaa_d)) self.assert_(bm.has_block(bbb_d)) self.assert_(bm.has_block(ccc_d)) self.assert_(bm.has_block(ddd_d)) bm.load_block(bbb_d) for i in range(10): bm.load_block(ccc_d) for i in range(5): bm.increment_epoch() self.failIf(bm.has_block(aaa_d)) self.assert_(bm.has_block(bbb_d)) self.assert_(bm.has_block(ccc_d)) self.assert_(bm.has_block(ddd_d)) for i in range(100): bm.increment_epoch() self.failIf(bm.has_block(aaa_d)) self.failIf(bm.has_block(bbb_d)) self.assert_(bm.has_block(ccc_d)) self.assert_(bm.has_block(ddd_d)) for i in range(1000): bm.increment_epoch() self.failIf(bm.has_block(aaa_d)) self.failIf(bm.has_block(bbb_d)) self.failIf(bm.has_block(ccc_d)) self.assert_(bm.has_block(ddd_d))
def test_new_containers_visible(self): # Test that the new containers appearing in all the sequences are visible # Create two storages at the same place self.reset_storage_params("a") storage1 = Storage.DirectoryStorage(self.storage_params) storage1.configure(self.CONFIGURATION, None) seq_id1 = storage1.create_sequence() self.reset_storage_params("b") storage2 = Storage.DirectoryStorage(self.storage_params) storage2.configure(self.CONFIGURATION, None) seq_id2 = storage2.create_sequence() self.assert_(seq_id1 != seq_id2) # Create 4 containers in each storage, make sure the containers are mutually # visible c1s = [] c2s = [] for i in range(4): c1 = storage1.create_container() c1.add_block(Digest.dataDigest("c1block%d" % i), Container.CODE_DATA, "c1block%d" % i) c1.finish_dump() c1.upload() c1s.append((seq_id1, c1.index)) c2 = storage2.create_container() c2.add_block(Digest.dataDigest("c2block%d" % i), Container.CODE_DATA, "c2block%d" % i) c2.finish_dump() c2.upload() c2s.append((seq_id2, c2.index)) # Reload the storages class Handler: def __init__(self): self.containers = [] def is_requested(self, sequence_id, container_idx, digest, code): if not (sequence_id, container_idx) in self.containers: # Since the last container is a summary, it will ask for all the # piggybacking containers too. Must make sure we register every new # container exactly once. self.containers.append((sequence_id, container_idx)) return False def loaded(self, digest, code, data): pass handler1 = Handler() storage1.load_sequences(handler1) self.assertEqual(c2s, sorted(handler1.containers)) handler2 = Handler() storage2.load_sequences(handler2) self.assertEqual(c1s, sorted(handler2.containers)) storage1.close() storage2.close()
def finish_dump(self): if self.compression_active: self.body_dumper.stop_compression() self.compression_active = False if self.encryption_active: self.body_dumper.stop_encryption() # # Serialize the body block table # body_table_io = StringIO.StringIO() body_blocks = self.body_dumper.get_blocks() serialize_blocks(body_table_io, body_blocks) body_table_str = body_table_io.getvalue() # # Serialize the header table # message = "Manent container %d of sequence '%s'" % ( self.index, base64.urlsafe_b64encode(self.sequence_id)) self.header_dumper.add_block(Digest.dataDigest(message), CODE_CONTAINER_DESCRIPTOR, message) self.header_dumper.add_block(Digest.dataDigest(body_table_str), CODE_BLOCK_TABLE, body_table_str) if self.encryption_active: self.header_dumper.stop_encryption() self.encryption_active = False header_blocks = self.header_dumper.get_blocks() header_table_io = StringIO.StringIO() serialize_blocks(header_table_io, header_blocks) header_table_str = header_table_io.getvalue() # # Write the header # assert self.header_file.tell() == 0 self.header_file.write(MAGIC) Format.write_int(self.header_file, VERSION) Format.write_int(self.header_file, self.index) Format.write_int(self.header_file, len(header_table_str)) self.header_file.write(Digest.dataDigest(header_table_str)) self.header_file.write(header_table_str) header_dump_str = self.header_dump_os.getvalue() Format.write_int(self.header_file, len(header_dump_str)) self.header_file.write(header_dump_str) logging.debug("Container %d has header of size %d" % (self.index, self.header_file.tell()))
def test_new_containers_in_active_sequence_caught(self): # Test that if new containers appear unexpectedly in the active sequence, # it is actually discovered. self.reset_storage_params("a") storage1 = Storage.DirectoryStorage(self.storage_params) storage1.configure(self.CONFIGURATION, None) seq_id1 = storage1.create_sequence() self.reset_storage_params("b") storage2 = Storage.DirectoryStorage(self.storage_params) storage2.configure(self.CONFIGURATION, None) storage2.create_sequence(test_override_sequence_id=seq_id1) # We need to create 4 countainers, since the first 3 are non-summary and # will thus not be discovered. for i in range(4): c = storage2.create_container() c.add_block(Digest.dataDigest("aaa%d" % i), Container.CODE_DATA, "aaa%d" % i) c.finish_dump() c.upload() try: storage1.load_sequences(None) except: logging.info("Got an expected exception:\n" + traceback.format_exc()) pass else: self.fail("Expected load_sequences to discover the unexpected container") try: storage1.close() storage2.close() except: # Ok, we can't. just pass. print "----------------------- Can't close storages!!!" pass
def test_container(self): # Test that container is created correctly. # See that the container is created, stored and reloaded back, # and that all blocks get restored. self.storage.set_piggybacking_headers(False) handler = MockHandler() container = self.storage.create_container() for d in DATA: container.add_block(Digest.dataDigest(d), Container.CODE_DATA, d) handler.add_expected(Digest.dataDigest(d), Container.CODE_DATA, d) self.storage.finalize_container(container) index = container.index container = self.storage.get_container(index) container.load_blocks(handler) self.failUnless(handler.check())
def test_piggybacking_block(self): # Check that piggybacking blocks are created when necessary. self.storage_manager.storage.max_container_size = 1000 * 1024 bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) for i in range(20000): # We need to make sure block doesn't compress well, otherwise # the container will never end. block = os.urandom(25000) + str(i) digest = Digest.dataDigest(block) bs.add_block(digest, Container.CODE_DATA, block) logging.debug("Added block %d. Number of containers %d" % (i, bs.num_containers_created)) if bs.num_containers_created == 4: # Container with index 3 must contain piggybacking headers. break container = self.storage_manager.get_container(3) class CheckHandler: def __init__(self): self.num_piggyback_headers = 0 def is_requested(self, digest, code): if code == Container.CODE_HEADER: self.num_piggyback_headers += 1 return False ch = CheckHandler() container.load_blocks(ch) self.assertEquals(3, ch.num_piggyback_headers)
def test_add_block(self): # Test that if blocks are added, they are available for loading back. storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': ''}) storage_manager.make_active_storage(storage_index) block = "some strange text" block_digest = Digest.dataDigest(block) storage_manager.add_block(block_digest, Container.CODE_DATA, block) storage_manager.flush() storage_manager.close() # Recreate the storage and read the block back storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() class Handler: def __init__(self): self.blocks = {} def is_requested(self, digest, code): return True def loaded(self, digest, code, data): self.blocks[(digest, code)] = data handler = Handler() storage_manager.load_blocks_for(block_digest, handler) self.assertEqual({(block_digest, Container.CODE_DATA): block}, handler.blocks) storage_manager.close()
def test_container_created(self): # Test that containers are created and restored correctly. # Create storage and a container self.reset_storage_params("a") storage = Storage.DirectoryStorage(self.storage_params) storage.configure(self.CONFIGURATION, None) seq_id = storage.create_sequence() container = storage.create_container() block = "some strange text" block_digest = Digest.dataDigest(block) container.add_block(block_digest, Container.CODE_DATA, block) container.finish_dump() container.upload() self.assertEqual(0, container.get_index()) self.assertEqual(seq_id, container.get_sequence_id()) storage.close() # Reload the storage and read the container self.reset_storage_params("a") storage = Storage.DirectoryStorage(self.storage_params) storage.load_configuration(None) container = storage.get_container(seq_id, 0) class Handler: def __init__(self): self.blocks = [] def is_requested(self, digest, code): return True def loaded(self, digest, code, data): self.blocks.append((digest, code, data)) handler = Handler() container.load_blocks(handler) logging.debug("Blocks: " + str(handler.blocks)) data_blocks = [b for b in handler.blocks if b[1] == Container.CODE_DATA] self.assertEqual(block_digest, data_blocks[0][0]) storage.close()
def test_container_with_piggybacking(self): # Test that container is created correctly. # See that the container is created, stored and reloaded back, # and that all blocks get restored. # In this test, we ask storage to provide separate headers, as if they were # found through piggybacking. self.storage.set_piggybacking_headers(True) handler = MockHandler() container = self.storage.create_container() for d in DATA: container.add_block(Digest.dataDigest(d), Container.CODE_DATA, d) handler.add_expected(Digest.dataDigest(d), Container.CODE_DATA, d) self.storage.finalize_container(container) index = container.index container = self.storage.get_container(index) container.load_blocks(handler) self.failUnless(handler.check())
def _load_header(self, header_file): logging.debug("****************************** loading header") magic = header_file.read(len(MAGIC)) if MAGIC != magic: raise Exception("Manent: magic number not found") version = Format.read_int(header_file) if version != VERSION: raise Exception("Container %d has unsupported version" % self.index) index = Format.read_int(header_file) if index != self.index: raise Exception( "Manent: wrong container file index. Expected %s, found %s" % (str(self.index), str(index))) header_table_size = Format.read_int(header_file) header_table_digest = header_file.read(Digest.dataDigestSize()) header_table_str = header_file.read(header_table_size) if Digest.dataDigest(header_table_str) != header_table_digest: raise Exception("Manent: header of container file corrupted") header_dump_len = Format.read_int(header_file) header_dump_str = header_file.read(header_dump_len) header_table_io = StringIO.StringIO(header_table_str) header_blocks = unserialize_blocks(header_table_io) class BlockTableListener: def __init__(self): self.body_table_str = None def is_requested(self, digest, code): return code == CODE_BLOCK_TABLE def loaded(self, digest, code, data): assert code == CODE_BLOCK_TABLE self.body_table_str = data listener = BlockTableListener() header_dump_str_io = StringIO.StringIO(header_dump_str) header_dump_loader = DataDumpLoader(header_dump_str_io, header_blocks, password=self.storage.get_encryption_key()) header_dump_loader.load_blocks(listener) body_table_io = StringIO.StringIO(listener.body_table_str) blocks = unserialize_blocks(body_table_io) return blocks
def unserialize_blocks(file): blocks = [] while True: digest = file.read(Digest.dataDigestSize()) if digest == "": break size = Format.read_int(file) code = Format.read_int(file) blocks.append((digest, size, code)) return blocks
def test_container_created(self): # Check that if blocks are added sufficiently many times, a new container # will be created. bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) self.assertEquals(0, bs.num_containers_created) for i in range(5000): block = os.urandom(500) + str(i) logging.debug("Adding block %d: size=%d" % (i, len(block))) bs.add_block(Digest.dataDigest(block), Container.CODE_DATA, block) # First container is created on the first block, so we need at least another # one to be created to know that the first one was closed. self.assert_(1 < bs.num_containers_created)
def test_flush(self): # Check that if flush() is called, all the current aside blocks are written # out (but not piggybacking blocks!) bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) block = "d" * 500 digest = Digest.dataDigest(block) self.block_manager.add_block(digest, Container.CODE_DIR, block) bs.add_block(digest, Container.CODE_DIR, block) bs.flush() # Block sequencer must create two containers: one to contain all the blocks # created above, and one to piggyback the header for the first container. self.assertEquals(2, bs.num_containers_created)
def start_encryption(self, algorithm_code, seed, password): """ Encryption can be started only when compression is inactive """ assert self.encryptor is None assert self.compressor is None self.blocks.append((seed, 0, algorithm_code)) if algorithm_code == CODE_ENCRYPTION_ARC4: key = Digest.dataDigest(seed + password) self.encryptor = Crypto.Cipher.ARC4.new(key) self.encrypted_data_size = 0 self.encrypted_data_digest = Digest.DataDigestAccumulator()
def test_handle_block(self): # Test that blockks that have been added by handle_block() are later found. bm = BlockManager.BlockManager(self.env, None) bm.handle_block(Digest.dataDigest("aaa"), Container.CODE_DATA, "aaa") bm.handle_block(Digest.dataDigest("bbb"), Container.CODE_DATA, "bbb") bm.handle_block(Digest.dataDigest("ccc"), Container.CODE_DIR, "ccc") bm.handle_block(Digest.dataDigest("ddd"), Container.CODE_DATA_PACKER, "ddd") # Loading the blocks. for i in range(5): self.assertEqual(bm.load_block(Digest.dataDigest("aaa")), "aaa") self.assertEqual(bm.load_block(Digest.dataDigest("bbb")), "bbb") self.assertEqual(bm.load_block(Digest.dataDigest("ccc")), "ccc") self.assertEqual(bm.load_block(Digest.dataDigest("ddd")), "ddd")
def test_add_many_aside_blocks(self): # Check that if aside blocks are added sufficiently many times, they will # eventually be written to a container. self.storage_manager.storage.max_container_size = 512 * 1024 bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) # All these blocks sit aside, and are not inserted into a container until a # normal block is inserted. for i in range(2000): block = os.urandom(1024) digest = Digest.dataDigest(block) logging.debug("Adding aside block %d: %d" % (i, len(block))) self.block_manager.add_block(digest, Container.CODE_DIR, block) bs.add_block(digest, Container.CODE_DIR, block) self.assertEquals(0, bs.num_containers_created) # Now insert a DATA block and observe that at least two containers have been # created - this is because the aside blocks have been pushed. # Dummy block must be larger than the aside block, otherwise it might fit # in the container which refused the aside block. block = os.urandom(2048) digest = Digest.dataDigest(block) bs.add_block(digest, Container.CODE_DATA, block) self.assert_(1 < bs.num_containers_created)
def test_add_block(self): # Test that blocks of different types can be added and restored. bm = BlockManager.BlockManager(self.env, None) bm.add_block(Digest.dataDigest("aaa"), Container.CODE_DATA, "aaa") bm.add_block(Digest.dataDigest("bbb"), Container.CODE_DATA_PACKER, "bbb") bm.add_block(Digest.dataDigest("ccc"), Container.CODE_DIR, "ccc") bm.add_block(Digest.dataDigest("ddd"), Container.CODE_DIR_PACKER, "ddd") # aaa is data block. It is not cached. self.failIf(bm.has_block(Digest.dataDigest("aaa"))) # self.assertEqual(bm.load_block(Digest.dataDigest("aaa")), "aaa") # bbb is not a data block. It should be there always self.assertEqual(bm.load_block(Digest.dataDigest("bbb")), "bbb") # ccc and ddd are non-data blocks, so they should be cached. self.assertEqual(bm.load_block(Digest.dataDigest("ccc")), "ccc") self.assertEqual(bm.load_block(Digest.dataDigest("ddd")), "ddd")
def test_data_dumper_encrypt(self): # Test data dumper when encryption is enabled handler = MockHandler() outfile = StringIO.StringIO() dumper = Container.DataDumper(outfile) seed = Digest.dataDigest("1") dumper.start_encryption(Container.CODE_ENCRYPTION_ARC4, seed, "kakamaika") for d in DATA: digest = Digest.dataDigest(d) dumper.add_block(digest, Container.CODE_DATA, d) handler.add_expected(digest, Container.CODE_DATA, d) dumper.stop_encryption() infile = StringIO.StringIO(outfile.getvalue()) blocks = dumper.get_blocks() undumper = Container.DataDumpLoader(infile, blocks, password="******") undumper.load_blocks(handler) self.failUnless(handler.check())
def test_add_storage(self): # Test that adding a storage creates (and recreates) it correctly storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': ''}) storage_manager.make_active_storage(storage_index) block = "some strange text" block_digest = Digest.dataDigest(block) storage_manager.add_block(block_digest, Container.CODE_DATA, block) storage_manager.flush() seq_id1 = storage_manager.get_active_sequence_id() storage_manager.close() # Recreate the storage_manager and add another block to it storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() block = "some other strange text" block_digest = Digest.dataDigest(block) storage_manager.add_block(block_digest, Container.CODE_DATA, block) storage_manager.flush() seq_id2 = storage_manager.get_active_sequence_id() self.assertEqual(seq_id1, seq_id2) storage_manager.close()
def start_dump(self, sequence_id, index): assert self.mode is None self.mode = "DUMP" self.sequence_id = sequence_id self.index = index self.header_file = self.storage.open_header_file( self.sequence_id, self.index) assert self.header_file.tell() == 0 self.body_file = self.storage.open_body_file( self.sequence_id, self.index) assert self.body_file.tell() == 0 self.piggyback_headers_num = 0 self.piggyback_headers_size = 0 self.max_num_piggyback_headers = compute_num_piggyback_headers(self.index) logging.debug("Container %d can add %d piggyback headers" % (self.index, self.max_num_piggyback_headers)) self.body_dumper = DataDumper(self.body_file) self.header_dump_os = StringIO.StringIO() self.header_dumper = DataDumper(self.header_dump_os) if self.storage.get_encryption_key() != "": self.encryption_active = True self.body_dumper.start_encryption( CODE_ENCRYPTION_ARC4, os.urandom(Digest.dataDigestSize()), self.storage.get_encryption_key()) self.header_dumper.start_encryption( CODE_ENCRYPTION_ARC4, os.urandom(Digest.dataDigestSize()), self.storage.get_encryption_key()) else: self.encryption_active = False self.body_dumper.start_compression(CODE_COMPRESSION_BZ2) self.compression_active = True self.compressed_data = 0
def test_load(self): # Test that increment saves to db and loads repository = Mock.MockRepository() blockDB = Mock.MockBlockDatabase(repository) db = self.env.get_database_btree("a", None, None) increment1 = Increment.Increment(blockDB, db) increment2 = Increment.Increment(blockDB, db) increment1.start(0, 1, "backup1", "test increment 1") increment1.finalize(Digest.dataDigest("aaaaaa"), 0, Nodes.NULL_STAT, 10) increment2.load(0, 1) for attr in ["comment", "fs_digest", "ctime", "ftime", "index", "storage_index"]: self.assertEqual(increment1.get_attribute(attr), increment2.get_attribute(attr))
def start_compression(self, algorithm_code): """ Compression can be started under encryption """ assert self.compressor is None digest = Digest.dataDigest(str(len(self.blocks))) self.pending_compression_start_block = (digest, 0, algorithm_code) if algorithm_code == CODE_COMPRESSION_BZ2: self.compressor = bz2.BZ2Compressor(9) elif algorithm_code == CODE_COMPRESSION_GZIP: self.compressor = zlib.compressobj() else: raise Exception("Unsupported compression algorithm") self.compressor_algorithm = algorithm_code self.uncompressed_size = 0 self.compressed_size = 0
def test_start(self): # Test that increment database starts increments correctly # # Create one increment and see that it produces correct basis # class MockStorageManager: def __init__(self): self.blocks = {} self.listeners = [] def get_active_storage_index(self): return 0 def add_block(self, digest, code, data): self.blocks[digest] = (code, data) def add_block_listener(self, listener): self.listeners.append(listener) msm = MockStorageManager() idb = IncrementManager.IncrementManager(self.env, self.txn, "backup1", msm) bases1, level1, num_f1 = idb.start_increment("test increment 1") self.assertEqual(bases1, None) self.assertEqual(level1, None) fs1_digest = Digest.dataDigest("data1") fs1_level = 0 idb.finalize_increment(fs1_digest, fs1_level, Nodes.NULL_STAT, 1) bases2, level2, num_files2 = idb.start_increment("test increment 2") # Unfinalized increment is not returned self.assertEqual(bases2, fs1_digest) self.assertEqual(level2, fs1_level) idb.close() # # Emulate restart of the program: IncrementDB is recreated from # the databases # idb = IncrementManager.IncrementManager(self.env, self.txn, "backup1", msm) bases3, level3, num_f3 = idb.start_increment("test increment 3") self.assertEqual(bases3, fs1_digest) self.assertEqual(level3, fs1_level) idb.close() idb = IncrementManager.IncrementManager(self.env, self.txn, "backup1", msm) bases4, level4, num_f4 = idb.start_increment("test increment 4") self.assertEqual(bases4, fs1_digest) self.assertEqual(level4, fs1_level) idb.close()
def test_data_dumper(self): # Basic test of data dumper: data in, data out handler = MockHandler() outfile = StringIO.StringIO() dumper = Container.DataDumper(outfile) for d in DATA: digest = Digest.dataDigest(d) dumper.add_block(digest, Container.CODE_DATA, d) handler.add_expected(digest, Container.CODE_DATA, d) infile = StringIO.StringIO(outfile.getvalue()) blocks = dumper.get_blocks() undumper = Container.DataDumpLoader(infile, blocks, password=None) undumper.load_blocks(handler) self.failUnless(handler.check())
def stop_compression(self): if self.pending_compression_start_block is not None: # No block was added between start_compression and stop_compression; # in this case, the start block wasn't added, and the stop block shouldn't # be added either. self.compressor = None self.pending_compression_start_block = None return assert self.compressor is not None tail = self.compressor.flush() self.compressed_size += len(tail) if self.encryptor is not None: tail = self.__encrypt(tail) self.file.write(tail) self.total_size += len(tail) self.blocks.append((Digest.dataDigest(""), self.compressed_size, CODE_COMPRESSION_END)) self.compressor = None
def test_base_storage(self): # Test that base storage works # First storage manager. This will be the base. logging.debug("creating first storage manager") storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': 'a'}) storage_manager.make_active_storage(storage_index) block = "some strange text" block_digest = Digest.dataDigest(block) storage_manager.add_block(block_digest, Container.CODE_DATA, block) storage_manager.flush() storage_manager.close() self.txn.commit() self.txn = None self.env.close() self.env = None Config.paths.clean_temp_area() # Second storage manager with a different db, and on a different storage # and see that it sees the block from the base one. logging.debug("creating second storage manager") class Handler: def __init__(self): self.blocks = {} def is_requested(self, digest, code): return True def loaded(self, digest, code, data): self.blocks[(digest, code)] = data handler = Handler() self.env = Database.PrivateDatabaseManager() self.txn = Database.TransactionHandler(self.env) storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': 'a'}) storage_manager.make_active_storage(storage_index) storage_manager.load_blocks_for(block_digest, handler) self.assertEqual({(block_digest, Container.CODE_DATA): block}, handler.blocks) storage_manager.close()
def test_data_dumper_compress(self): # Test data dumper when compression is enabled handler = MockHandler() outfile = StringIO.StringIO() dumper = Container.DataDumper(outfile) dumper.start_compression(Container.CODE_COMPRESSION_BZ2) for d in DATA: digest = Digest.dataDigest(d) dumper.add_block(digest, Container.CODE_DATA, d) handler.add_expected(digest, Container.CODE_DATA, d) dumper.stop_compression() infile = StringIO.StringIO(outfile.getvalue()) blocks = dumper.get_blocks() undumper = Container.DataDumpLoader(infile, blocks, password=None) undumper.load_blocks(handler) self.failUnless(handler.check())
def test_rescan_storage(self): # Test that new sequences appearing from outside are discovered storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': ''}) storage_manager.make_active_storage(storage_index) block = "some strange text" block_digest = Digest.dataDigest(block) storage_manager.add_block(block_digest, Container.CODE_DATA, block) storage_manager.flush() storage_manager.close() self.txn.commit() self.txn = None self.env.close() self.env = None Config.paths.clean_temp_area() # Create second storage manager with a different db, but on the same storage # (mock shares all the files), and see that it sees the block from the first # one. class Handler: def __init__(self): self.blocks = {} def is_requested(self, digest, code): return True def loaded(self, digest, code, data): self.blocks[(digest, code)] = data handler = Handler() self.env = Database.PrivateDatabaseManager() self.txn = Database.TransactionHandler(self.env) storage_manager = StorageManager.StorageManager(self.env, self.txn) storage_manager.load_storages() storage_index = storage_manager.add_storage( {'type': '__mock__', 'encryption_key': 'kuku', 'key': ''}) storage_manager.make_active_storage(storage_index) storage_manager.load_blocks_for(block_digest, handler) self.assertEqual({(block_digest, Container.CODE_DATA): block}, handler.blocks) storage_manager.close()
def test_clean_start(self): # Check that if BlockSequencer is started cleanly, it is initialized # correctly. bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) self.assertEquals(0, bs.get_aside_blocks_num()) self.assertEquals(0, bs.get_aside_blocks_size()) self.assertEquals(0, bs.get_piggyback_headers_num()) block = "kukumuku" bs.add_block(Digest.dataDigest(block), Container.CODE_DIR, block) self.assertEquals(1, bs.get_aside_blocks_num()) self.assertEquals(len(block), bs.get_aside_blocks_size()) bs.close() # Check that if BlockSequencer is started the second time, all the state is # preserved. bs = BlockSequencer.BlockSequencer( self.env, self.txn, self.storage_manager, self.block_manager) self.assertEquals(1, bs.get_aside_blocks_num()) self.assertEquals(len(block), bs.get_aside_blocks_size()) bs.close()