def test_builder_build(self): # generate shards for testing bucket = Builder(addresses["beta"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path) # see if the shards exist seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) print("PATH", path) self.assertTrue(os.path.exists(path)) bucket.clean(self.store_path) # generate shards for testing bucket = Builder(addresses["gamma"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path, cleanup=True) # see if the shards are deleted seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertFalse(os.path.exists(path))
def test_builder_build(self): # generate shards for testing bucket = Builder(addresses["beta"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path) # see if the shards exist seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) print("PATH", path) self.assertTrue(os.path.exists(path)) bucket.clean(self.store_path) # generate shards for testing bucket = Builder(addresses["gamma"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path, cleanup=True) # see if the shards are deleted seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertFalse(os.path.exists(path))
def test_builder_clean(self): # generate shards for testing bucket = Builder(addresses["delta"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path) # see if the shards exist seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertTrue(os.path.exists(path)) # clean command bucket.clean(self.store_path) # see if the shards are deleted seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertFalse(os.path.exists(path))
def test_builder_clean(self): # generate shards for testing bucket = Builder(addresses["delta"], my_shard_size, my_max_size, my_min_free_size) bucket.build(self.store_path) # see if the shards exist seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertTrue(os.path.exists(path)) # clean command bucket.clean(self.store_path) # see if the shards are deleted seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) self.assertFalse(os.path.exists(path))
def test_builder_rebuilds(self): bucket = Builder(addresses["epsilon"], my_shard_size, my_max_size, my_min_free_size) # generate empty files to be rebuilt seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) with open(path, 'a'): os.utime(path, None) # rebuild all files bucket.build(self.store_path, rebuild=True)
def test_builder_rebuilds(self): bucket = Builder(addresses["epsilon"], my_shard_size, my_max_size, my_min_free_size) # generate empty files to be rebuilt seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) with open(path, 'a'): os.utime(path, None) # rebuild all files bucket.build(self.store_path, rebuild=True)
def test_build_cont(self): max_size1 = 1024 * 1024 * 384 max_size2 = 1024 * 1024 * 128 # generate shards for testing start_time = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size1, my_min_free_size) bucket.build(self.store_path) end_delta = datetime.utcnow() - start_time # should skip all shards and be faster start_time2 = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size2, my_min_free_size) bucket.build(self.store_path) end_delta2 = datetime.utcnow() - start_time2 self.assertTrue(end_delta2 < end_delta) # delete 10% random files my_height = int(max_size2 / my_shard_size) seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) if randint(0, 9) == 0: os.remove(path) # should rebuild missing shards and be slower as skip all # but faster as new build start_time3 = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size2, my_min_free_size) bucket.build(self.store_path, repair=True) end_delta3 = datetime.utcnow() - start_time3 self.assertTrue(end_delta3 < end_delta) # faster than new build self.assertTrue(end_delta3 > end_delta2) # slower than skip all
def test_build_cont(self): max_size1 = 1024 * 1024 * 384 max_size2 = 1024 * 1024 * 128 # generate shards for testing start_time = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size1, my_min_free_size) bucket.build(self.store_path) end_delta = datetime.utcnow() - start_time # should skip all shards and be faster start_time2 = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size2, my_min_free_size) bucket.build(self.store_path) end_delta2 = datetime.utcnow() - start_time2 self.assertTrue(end_delta2 < end_delta) # delete 10% random files my_height = int(max_size2 / my_shard_size) seeds = bucket.build_seeds(height) for seed in seeds: path = os.path.join(self.store_path, seed) if randint(0,9)==0: os.remove(path) # should rebuild missing shards and be slower as skip all # but faster as new build start_time3 = datetime.utcnow() bucket = Builder(addresses["epsilon"], my_shard_size, max_size2, my_min_free_size) bucket.build(self.store_path, repair=True) end_delta3 = datetime.utcnow() - start_time3 self.assertTrue(end_delta3 < end_delta) # faster than new build self.assertTrue(end_delta3 > end_delta2) # slower than skip all
def test_builder_audit(self): bucket = Builder(addresses["epsilon"], my_shard_size, 0, my_min_free_size) # check last confirmed bitcoin hash btc_block = bucket.btc_last_confirmed_block() self.assertTrue( btc_block['confirmations'] >= common.DEFAULT_MIN_CONFIRMATIONS) self.assertTrue(btc_block['is_orphan'] == False) index = btc_block['block_no'] block_pos = index % common.DEFAULT_FULL_AUDIT block_size = common.DEFAULT_BLOCK_SIZE # create empty files to skip to btc_index seeds = bucket.build_seeds(block_pos * block_size) for seed in seeds: path = os.path.join(self.store_path, seed) open(path, 'w').close() # generate shards for audit shard_size = my_shard_size * (block_pos + 1) * block_size bucket = Builder(addresses["epsilon"], my_shard_size, shard_size, my_min_free_size) bucket.build(self.store_path) # audit possible good_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertTrue(good_hash) seeds = bucket.build_seeds((block_pos + 1) * block_size) # copy a bad file for a bad audit path1 = os.path.join(self.store_path, seeds[-2]) path2 = os.path.join(self.store_path, seeds[-1]) shutil.copyfile(path1, path2) bad_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertFalse(good_hash == bad_hash) # write some bad data with open(path2, "a") as f: f.write("bad data is bad\n") # audit failed because last shard has bad data self.assertFalse( bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash'])) # remove last shard os.remove(path2) # audit failed because last shard missing self.assertFalse( bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash'])) # build last shard again bucket = Builder(addresses["epsilon"], my_shard_size, shard_size, my_min_free_size) bucket.build(self.store_path) # audit possible good_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertTrue(good_hash) # remove first shard of that block path1 = os.path.join(self.store_path, seeds[-80]) os.remove(path1) # audit failed because first shard missing self.assertFalse( bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']))
def test_builder_audit(self): bucket = Builder(addresses["epsilon"], my_shard_size, 0, my_min_free_size) # check last confirmed bitcoin hash btc_block = bucket.btc_last_confirmed_block() self.assertTrue(btc_block['confirmations']>= common.DEFAULT_MIN_CONFIRMATIONS) self.assertTrue(btc_block['is_orphan']==False) index = btc_block['block_no'] block_pos = index % common.DEFAULT_FULL_AUDIT block_size = common.DEFAULT_BLOCK_SIZE # create empty files to skip to btc_index seeds = bucket.build_seeds(block_pos * block_size) for seed in seeds: path = os.path.join(self.store_path, seed) open(path,'w').close() # generate shards for audit shard_size = my_shard_size * (block_pos + 1) * block_size bucket = Builder(addresses["epsilon"], my_shard_size, shard_size, my_min_free_size) bucket.build(self.store_path) # audit possible good_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertTrue(good_hash) seeds = bucket.build_seeds((block_pos + 1) * block_size) # copy a bad file for a bad audit path1 = os.path.join(self.store_path, seeds[-2]) path2 = os.path.join(self.store_path, seeds[-1]) shutil.copyfile(path1, path2) bad_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertFalse(good_hash==bad_hash) # write some bad data with open(path2, "a") as f: f.write("bad data is bad\n") # audit failed because last shard has bad data self.assertFalse(bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash'])) # remove last shard os.remove(path2) # audit failed because last shard missing self.assertFalse(bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash'])) # build last shard again bucket = Builder(addresses["epsilon"], my_shard_size, shard_size, my_min_free_size) bucket.build(self.store_path) # audit possible good_hash = bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']) self.assertTrue(good_hash) # remove first shard of that block path1 = os.path.join(self.store_path, seeds[-80]) os.remove(path1) # audit failed because first shard missing self.assertFalse(bucket.audit(self.store_path, btc_block['block_no'], btc_block['blockhash']))