class BlobComponent(Component): component_name = BLOB_COMPONENT depends_on = [DATABASE_COMPONENT, DHT_COMPONENT] def __init__(self, component_manager): Component.__init__(self, component_manager) self.blob_manager = None @property def component(self): return self.blob_manager def start(self): storage = self.component_manager.get_component(DATABASE_COMPONENT) dht_node = self.component_manager.get_component(DHT_COMPONENT) self.blob_manager = DiskBlobManager(CS.get_blobfiles_dir(), storage, dht_node._dataStore) return self.blob_manager.setup() def stop(self): return self.blob_manager.stop() @defer.inlineCallbacks def get_status(self): count = 0 if self.blob_manager: count = yield self.blob_manager.storage.count_finished_blobs() defer.returnValue({'finished_blobs': count})
def download_temp_blob_from_peer(self, peer, timeout, blob_hash): tmp_dir = yield threads.deferToThread(tempfile.mkdtemp) tmp_blob_manager = DiskBlobManager(tmp_dir, tmp_dir) try: result = yield self.download_blob_from_peer( peer, timeout, blob_hash, tmp_blob_manager) finally: yield tmp_blob_manager.stop() yield threads.deferToThread(shutil.rmtree, tmp_dir) defer.returnValue(result)
class TestTransfer(unittest.TestCase): @defer.inlineCallbacks def setUp(self): mocks.mock_conf_settings(self) self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.peer_finder = FakePeerFinder(5553, self.peer_manager, 1) self.rate_limiter = RateLimiter() self.prm = OnlyFreePaymentsManager() self.storage = SQLiteStorage(self.db_dir) self.blob_manager = DiskBlobManager(self.blob_dir, self.storage) self.sd_identifier = StreamDescriptorIdentifier() self.lbry_file_manager = EncryptedFileManager( self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, self.sd_identifier) self.uploader = LbryUploader(5209343) self.sd_hash = yield self.uploader.setup() yield self.storage.setup() yield self.blob_manager.setup() yield self.lbry_file_manager.setup() yield add_lbry_file_to_sd_identifier(self.sd_identifier) @defer.inlineCallbacks def tearDown(self): yield self.uploader.stop() lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield self.storage.stop() rm_db_and_blob_dir(self.db_dir, self.blob_dir) if os.path.exists("test_file"): os.remove("test_file") @defer.inlineCallbacks def test_lbry_transfer(self): sd_blob = yield download_sd_blob(self.sd_hash, self.blob_manager, self.peer_finder, self.rate_limiter, self.prm, self.wallet) metadata = yield self.sd_identifier.get_metadata_for_sd_blob(sd_blob) downloader = yield metadata.factories[0].make_downloader( metadata, self.prm.min_blob_data_payment_rate, self.prm, self.db_dir, download_mirrors=None) yield downloader.start() with open(os.path.join(self.db_dir, 'test_file'), 'rb') as f: hashsum = md5() hashsum.update(f.read()) self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
def download_it(peer, timeout, blob_hash): tmp_dir = yield threads.deferToThread(tempfile.mkdtemp) storage = SQLiteStorage(tmp_dir, reactor) yield storage.setup() tmp_blob_manager = DiskBlobManager(tmp_dir, storage) config = {'auto_connect': True} if conf.settings['lbryum_wallet_dir']: config['lbryum_wallet_dir'] = conf.settings['lbryum_wallet_dir'] config['use_keyring'] = False config['blockchain_name'] = conf.settings['blockchain_name'] config['lbryum_servers'] = [] wallet = LbryWalletManager.from_lbrynet_config(config, storage) downloader = SinglePeerDownloader() downloader.setup(wallet) try: blob_downloaded = yield downloader.download_blob_from_peer( peer, timeout, blob_hash, tmp_blob_manager) if blob_downloaded: log.info("SUCCESS!") blob = yield tmp_blob_manager.get_blob(blob_hash) pprint(blob) if not blob.verified: log.error("except that its not verified....") else: reader = BlobStreamDescriptorReader(blob) info = None for x in range(0, 3): try: info = yield reader.get_info() except ValueError: pass if info: break # there's some kind of race condition where it sometimes doesnt write the blob to disk in time time.sleep(0.1) if info is not None: pprint(info) for content_blob in info['blobs']: if 'blob_hash' in content_blob: yield download_it(peer, timeout, content_blob['blob_hash']) else: log.error("Download failed") finally: yield tmp_blob_manager.stop() yield threads.deferToThread(shutil.rmtree, tmp_dir) defer.returnValue(True)
def download_it(peer, timeout, blob_hash): tmp_dir = yield threads.deferToThread(tempfile.mkdtemp) announcer = DummyHashAnnouncer() tmp_blob_manager = DiskBlobManager(announcer, tmp_dir, tmp_dir) config = {'auto_connect': True} if conf.settings['lbryum_wallet_dir']: config['lbryum_path'] = conf.settings['lbryum_wallet_dir'] storage = Wallet.InMemoryStorage() wallet = Wallet.LBRYumWallet(storage, config) downloader = SinglePeerDownloader() downloader.setup(wallet) try: blob_downloaded = yield downloader.download_blob_from_peer( peer, timeout, blob_hash, tmp_blob_manager) if blob_downloaded: log.info("SUCCESS!") blob = yield tmp_blob_manager.get_blob(blob_hash) pprint(blob) if not blob.verified: log.error("except that its not verified....") else: reader = BlobStreamDescriptorReader(blob) info = None for x in range(0, 3): try: info = yield reader.get_info() except ValueError: pass if info: break time.sleep( 0.1 ) # there's some kind of race condition where it sometimes doesnt write the blob to disk in time if info is not None: pprint(info) for content_blob in info['blobs']: if 'blob_hash' in content_blob: yield download_it(peer, timeout, content_blob['blob_hash']) else: log.error("Download failed") finally: yield tmp_blob_manager.stop() yield threads.deferToThread(shutil.rmtree, tmp_dir) defer.returnValue(True)
class BlobManagerTest(unittest.TestCase): @defer.inlineCallbacks def setUp(self): conf.initialize_settings(False) self.blob_dir = tempfile.mkdtemp() self.db_dir = tempfile.mkdtemp() self.bm = DiskBlobManager(self.blob_dir, SQLiteStorage(self.db_dir)) self.peer = Peer('somehost', 22) yield self.bm.storage.setup() @defer.inlineCallbacks def tearDown(self): yield self.bm.stop() yield self.bm.storage.stop() shutil.rmtree(self.blob_dir) shutil.rmtree(self.db_dir) @defer.inlineCallbacks def _create_and_add_blob(self, should_announce=False): # create and add blob to blob manager data_len = random.randint(1, 1000) data = b''.join( random.choice(string.ascii_lowercase).encode() for _ in range(data_len)) hashobj = get_lbry_hash_obj() hashobj.update(data) out = hashobj.hexdigest() blob_hash = out # create new blob yield self.bm.setup() blob = yield self.bm.get_blob(blob_hash, len(data)) writer, finished_d = yield blob.open_for_writing(self.peer) yield writer.write(data) yield self.bm.blob_completed(blob, should_announce) # check to see if blob is there self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertTrue(blob_hash in blobs) defer.returnValue(blob_hash) @defer.inlineCallbacks def test_create_blob(self): blob_hashes = [] # create a bunch of blobs for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(10, len(blobs)) @defer.inlineCallbacks def test_delete_blob(self): # create blob blob_hash = yield self._create_and_add_blob() blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 1) # delete blob yield self.bm.delete_blobs([blob_hash]) self.assertFalse(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 0) blobs = yield self.bm.storage.get_all_blob_hashes() self.assertEqual(len(blobs), 0) self.assertFalse(blob_hash in self.bm.blobs) # delete blob that was already deleted once yield self.bm.delete_blobs([blob_hash]) # delete blob that does not exist, nothing will # happen blob_hash = random_lbry_hash() yield self.bm.delete_blobs([blob_hash]) @defer.inlineCallbacks def test_delete_open_blob(self): # Test that a blob that is opened for writing will not be deleted # create blobs blob_hashes = [] for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) # open the last blob blob = yield self.bm.get_blob(blob_hashes[-1]) w, finished_d = yield blob.open_for_writing(self.peer) # schedule a close, just to leave the reactor clean finished_d.addBoth(lambda x: None) self.addCleanup(w.close) # delete the last blob and check if it still exists yield self.bm.delete_blobs([blob_hash]) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) self.assertTrue(blob_hashes[-1] in blobs) self.assertTrue( os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1]))) @defer.inlineCallbacks def test_should_announce(self): # create blob with should announce blob_hash = yield self._create_and_add_blob(should_announce=True) out = yield self.bm.get_should_announce(blob_hash) self.assertTrue(out) count = yield self.bm.count_should_announce_blobs() self.assertEqual(1, count) # set should announce to False yield self.bm.set_should_announce(blob_hash, should_announce=False) out = yield self.bm.get_should_announce(blob_hash) self.assertFalse(out) count = yield self.bm.count_should_announce_blobs() self.assertEqual(0, count)
class BlobManagerTest(unittest.TestCase): def setUp(self): conf.initialize_settings() self.blob_dir = tempfile.mkdtemp() self.db_dir = tempfile.mkdtemp() hash_announcer = DummyHashAnnouncer() self.bm = DiskBlobManager(hash_announcer, self.blob_dir, self.db_dir) self.peer = Peer('somehost', 22) def tearDown(self): self.bm.stop() # BlobFile will try to delete itself in _close_writer # thus when calling rmtree we may get a FileNotFoundError # for the blob file shutil.rmtree(self.blob_dir, ignore_errors=True) shutil.rmtree(self.db_dir) @defer.inlineCallbacks def _create_and_add_blob(self): # create and add blob to blob manager data_len = random.randint(1, 1000) data = ''.join( random.choice(string.lowercase) for data_len in range(data_len)) hashobj = get_lbry_hash_obj() hashobj.update(data) out = hashobj.hexdigest() blob_hash = out # create new blob yield self.bm.setup() blob = yield self.bm.get_blob(blob_hash, len(data)) writer, finished_d = yield blob.open_for_writing(self.peer) yield writer.write(data) yield self.bm.blob_completed(blob) yield self.bm.add_blob_to_upload_history(blob_hash, 'test', len(data)) # check to see if blob is there self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertTrue(blob_hash in blobs) defer.returnValue(blob_hash) @defer.inlineCallbacks def test_create_blob(self): blob_hashes = [] # create a bunch of blobs for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(10, len(blobs)) @defer.inlineCallbacks def test_delete_blob(self): # create blob blob_hash = yield self._create_and_add_blob() blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 1) # delete blob yield self.bm.delete_blobs([blob_hash]) self.assertFalse(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 0) blobs = yield self.bm._get_all_blob_hashes() self.assertEqual(len(blobs), 0) # delete blob that does not exist, nothing will # happen blob_hash = random_lbry_hash() out = yield self.bm.delete_blobs([blob_hash]) @defer.inlineCallbacks def test_delete_open_blob(self): # Test that a blob that is opened for writing will not be deleted # create blobs blob_hashes = [] for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) # open the last blob blob = yield self.bm.get_blob(blob_hashes[-1]) writer, finished_d = yield blob.open_for_writing(self.peer) # delete the last blob and check if it still exists out = yield self.bm.delete_blobs([blob_hash]) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) self.assertTrue(blob_hashes[-1] in blobs) self.assertTrue( os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1])))
class CreateEncryptedFileTest(unittest.TestCase): timeout = 5 def setUp(self): mocks.mock_conf_settings(self) self.tmp_db_dir, self.tmp_blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.peer_finder = FakePeerFinder(5553, self.peer_manager, 2) self.rate_limiter = DummyRateLimiter() self.sd_identifier = StreamDescriptorIdentifier() self.storage = SQLiteStorage(self.tmp_db_dir) self.blob_manager = DiskBlobManager(self.tmp_blob_dir, self.storage) self.prm = OnlyFreePaymentsManager() self.lbry_file_manager = EncryptedFileManager( self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, self.sd_identifier) d = self.storage.setup() d.addCallback(lambda _: self.lbry_file_manager.setup()) return d @defer.inlineCallbacks def tearDown(self): yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield self.storage.stop() rm_db_and_blob_dir(self.tmp_db_dir, self.tmp_blob_dir) @defer.inlineCallbacks def create_file(self, filename): handle = mocks.GenFile(3 * MB, '1') key = '2' * (AES.block_size / 8) out = yield EncryptedFileCreator.create_lbry_file( self.blob_manager, self.storage, self.prm, self.lbry_file_manager, filename, handle, key, iv_generator()) defer.returnValue(out) @defer.inlineCallbacks def test_can_create_file(self): expected_stream_hash = "41e6b247d923d191b154fb6f1b8529d6ddd6a73d65c35" \ "7b1acb742dd83151fb66393a7709e9f346260a4f4db6de10c25" expected_sd_hash = "db043b44384c149126685990f6bb6563aa565ae331303d522" \ "c8728fe0534dd06fbcacae92b0891787ad9b68ffc8d20c1" filename = 'test.file' lbry_file = yield self.create_file(filename) sd_hash = yield self.storage.get_sd_blob_hash_for_stream( lbry_file.stream_hash) # read the sd blob file sd_blob = self.blob_manager.blobs[sd_hash] sd_reader = BlobStreamDescriptorReader(sd_blob) sd_file_info = yield sd_reader.get_info() # this comes from the database, the blobs returned are sorted sd_info = yield get_sd_info(self.storage, lbry_file.stream_hash, include_blobs=True) self.assertDictEqual(sd_info, sd_file_info) self.assertListEqual(sd_info['blobs'], sd_file_info['blobs']) self.assertEqual(sd_info['stream_hash'], expected_stream_hash) self.assertEqual(len(sd_info['blobs']), 3) self.assertNotEqual(sd_info['blobs'][0]['length'], 0) self.assertNotEqual(sd_info['blobs'][1]['length'], 0) self.assertEqual(sd_info['blobs'][2]['length'], 0) self.assertEqual(expected_stream_hash, lbry_file.stream_hash) self.assertEqual(sd_hash, lbry_file.sd_hash) self.assertEqual(sd_hash, expected_sd_hash) blobs = yield self.blob_manager.get_all_verified_blobs() self.assertEqual(3, len(blobs)) num_should_announce_blobs = yield self.blob_manager.count_should_announce_blobs( ) self.assertEqual(2, num_should_announce_blobs) @defer.inlineCallbacks def test_can_create_file_with_unicode_filename(self): expected_stream_hash = ( 'd1da4258f3ce12edb91d7e8e160d091d3ab1432c2e55a6352dce0' '2fd5adb86fe144e93e110075b5865fff8617776c6c0') filename = u'☃.file' lbry_file = yield self.create_file(filename) self.assertEqual(expected_stream_hash, lbry_file.stream_hash)
class CreateEncryptedFileTest(unittest.TestCase): timeout = 5 def setUp(self): mocks.mock_conf_settings(self) self.tmp_db_dir, self.tmp_blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.peer_finder = FakePeerFinder(5553, self.peer_manager, 2) self.rate_limiter = DummyRateLimiter() self.sd_identifier = StreamDescriptorIdentifier() self.storage = SQLiteStorage(self.tmp_db_dir) self.blob_manager = DiskBlobManager(self.tmp_blob_dir, self.storage) self.prm = OnlyFreePaymentsManager() self.lbry_file_manager = EncryptedFileManager( self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, self.sd_identifier) d = self.storage.setup() d.addCallback(lambda _: self.lbry_file_manager.setup()) return d @defer.inlineCallbacks def tearDown(self): yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield self.storage.stop() rm_db_and_blob_dir(self.tmp_db_dir, self.tmp_blob_dir) @defer.inlineCallbacks def create_file(self, filename): handle = mocks.GenFile(3 * MB, b'1') key = b'2' * (AES.block_size // 8) out = yield EncryptedFileCreator.create_lbry_file( self.blob_manager, self.storage, self.prm, self.lbry_file_manager, filename, handle, key, iv_generator()) defer.returnValue(out) @defer.inlineCallbacks def test_can_create_file(self): expected_stream_hash = "41e6b247d923d191b154fb6f1b8529d6ddd6a73d65c35" \ "7b1acb742dd83151fb66393a7709e9f346260a4f4db6de10c25" expected_sd_hash = "40c485432daec586c1a2d247e6c08d137640a5af6e81f3f652" \ "3e62e81a2e8945b0db7c94f1852e70e371d917b994352c" filename = 'test.file' lbry_file = yield self.create_file(filename) sd_hash = yield self.storage.get_sd_blob_hash_for_stream( lbry_file.stream_hash) # read the sd blob file sd_blob = self.blob_manager.blobs[sd_hash] sd_reader = BlobStreamDescriptorReader(sd_blob) sd_file_info = yield sd_reader.get_info() # this comes from the database, the blobs returned are sorted sd_info = yield get_sd_info(self.storage, lbry_file.stream_hash, include_blobs=True) self.maxDiff = None unicode_sd_info = json.loads( json.dumps(sd_info, sort_keys=True, cls=JSONBytesEncoder)) self.assertDictEqual(unicode_sd_info, sd_file_info) self.assertEqual(sd_info['stream_hash'], expected_stream_hash) self.assertEqual(len(sd_info['blobs']), 3) self.assertNotEqual(sd_info['blobs'][0]['length'], 0) self.assertNotEqual(sd_info['blobs'][1]['length'], 0) self.assertEqual(sd_info['blobs'][2]['length'], 0) self.assertEqual(expected_stream_hash, lbry_file.stream_hash) self.assertEqual(sd_hash, lbry_file.sd_hash) self.assertEqual(sd_hash, expected_sd_hash) blobs = yield self.blob_manager.get_all_verified_blobs() self.assertEqual(3, len(blobs)) num_should_announce_blobs = yield self.blob_manager.count_should_announce_blobs( ) self.assertEqual(2, num_should_announce_blobs) @defer.inlineCallbacks def test_can_create_file_with_unicode_filename(self): expected_stream_hash = ( 'd1da4258f3ce12edb91d7e8e160d091d3ab1432c2e55a6352dce0' '2fd5adb86fe144e93e110075b5865fff8617776c6c0') filename = '☃.file' lbry_file = yield self.create_file(filename) self.assertEqual(expected_stream_hash, lbry_file.stream_hash)
class LbryUploader(object): def __init__(self, file_size, ul_rate_limit=None): self.file_size = file_size self.ul_rate_limit = ul_rate_limit self.kill_check = None # these attributes get defined in `start` self.db_dir = None self.blob_dir = None self.wallet = None self.peer_manager = None self.rate_limiter = None self.prm = None self.storage = None self.blob_manager = None self.lbry_file_manager = None self.server_port = None @defer.inlineCallbacks def setup(self): init_conf_windows() self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.rate_limiter = RateLimiter() if self.ul_rate_limit is not None: self.rate_limiter.set_ul_limit(self.ul_rate_limit) self.prm = OnlyFreePaymentsManager() self.storage = SQLiteStorage(self.db_dir) self.blob_manager = DiskBlobManager(self.blob_dir, self.storage) self.lbry_file_manager = EncryptedFileManager( FakePeerFinder(5553, self.peer_manager, 1), self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, StreamDescriptorIdentifier()) yield self.storage.setup() yield self.blob_manager.setup() yield self.lbry_file_manager.setup() query_handler_factories = { 1: BlobAvailabilityHandlerFactory(self.blob_manager), 2: BlobRequestHandlerFactory(self.blob_manager, self.wallet, self.prm, None), 3: self.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(self.rate_limiter, query_handler_factories, self.peer_manager) self.server_port = reactor.listenTCP(5553, server_factory, interface="localhost") test_file = GenFile(self.file_size, bytes(i for i in range(0, 64, 6))) lbry_file = yield create_lbry_file(self.blob_manager, self.storage, self.prm, self.lbry_file_manager, "test_file", test_file) defer.returnValue(lbry_file.sd_hash) @defer.inlineCallbacks def stop(self): lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield self.storage.stop() self.server_port.stopListening() rm_db_and_blob_dir(self.db_dir, self.blob_dir) if os.path.exists("test_file"): os.remove("test_file")
class BlobManagerTest(unittest.TestCase): def setUp(self): conf.initialize_settings() self.blob_dir = tempfile.mkdtemp() self.db_dir = tempfile.mkdtemp() hash_announcer = DummyHashAnnouncer() self.bm = DiskBlobManager(hash_announcer, self.blob_dir, self.db_dir) self.peer = Peer('somehost', 22) def tearDown(self): self.bm.stop() # BlobFile will try to delete itself in _close_writer # thus when calling rmtree we may get a FileNotFoundError # for the blob file shutil.rmtree(self.blob_dir, ignore_errors=True) shutil.rmtree(self.db_dir) @defer.inlineCallbacks def _create_and_add_blob(self, should_announce=False): # create and add blob to blob manager data_len = random.randint(1, 1000) data = ''.join(random.choice(string.lowercase) for data_len in range(data_len)) hashobj = get_lbry_hash_obj() hashobj.update(data) out = hashobj.hexdigest() blob_hash = out # create new blob yield self.bm.setup() blob = yield self.bm.get_blob(blob_hash, len(data)) writer, finished_d = yield blob.open_for_writing(self.peer) yield writer.write(data) yield self.bm.blob_completed(blob, should_announce) yield self.bm.add_blob_to_upload_history(blob_hash, 'test', len(data)) # check to see if blob is there self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertTrue(blob_hash in blobs) defer.returnValue(blob_hash) @defer.inlineCallbacks def test_create_blob(self): blob_hashes = [] # create a bunch of blobs for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(10, len(blobs)) @defer.inlineCallbacks def test_delete_blob(self): # create blob blob_hash = yield self._create_and_add_blob() blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 1) # delete blob yield self.bm.delete_blobs([blob_hash]) self.assertFalse(os.path.isfile(os.path.join(self.blob_dir, blob_hash))) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 0) blobs = yield self.bm._get_all_blob_hashes() self.assertEqual(len(blobs), 0) self.assertFalse(blob_hash in self.bm.blobs) # delete blob that was already deleted once out = yield self.bm.delete_blobs([blob_hash]) # delete blob that does not exist, nothing will # happen blob_hash = random_lbry_hash() out = yield self.bm.delete_blobs([blob_hash]) @defer.inlineCallbacks def test_delete_open_blob(self): # Test that a blob that is opened for writing will not be deleted # create blobs blob_hashes = [] for i in range(0, 10): blob_hash = yield self._create_and_add_blob() blob_hashes.append(blob_hash) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) # open the last blob blob = yield self.bm.get_blob(blob_hashes[-1]) writer, finished_d = yield blob.open_for_writing(self.peer) # delete the last blob and check if it still exists out = yield self.bm.delete_blobs([blob_hash]) blobs = yield self.bm.get_all_verified_blobs() self.assertEqual(len(blobs), 10) self.assertTrue(blob_hashes[-1] in blobs) self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hashes[-1]))) @defer.inlineCallbacks def test_should_announce(self): # create blob with should announce blob_hash = yield self._create_and_add_blob(should_announce=True) out = yield self.bm.get_should_announce(blob_hash) self.assertTrue(out) count = yield self.bm.count_should_announce_blobs() self.assertEqual(1, count) # set should annouce to False out = yield self.bm.set_should_announce(blob_hash, should_announce=False) out = yield self.bm.get_should_announce(blob_hash) self.assertFalse(out) count = yield self.bm.count_should_announce_blobs() self.assertEqual(0, count)