def save_sd_info(blob_manager, sd_hash, sd_info): if not blob_manager.blobs.get( sd_hash) or not blob_manager.blobs[sd_hash].get_is_verified(): descriptor_writer = BlobStreamDescriptorWriter(blob_manager) calculated_sd_hash = yield descriptor_writer.create_descriptor(sd_info) if calculated_sd_hash != sd_hash: raise InvalidStreamDescriptorError( "%s does not match calculated %s" % (sd_hash, calculated_sd_hash)) stream_hash = yield f2d( blob_manager.storage.get_stream_hash_for_sd_hash(sd_hash)) if not stream_hash: log.debug("Saving info for %s", unhexlify(sd_info['stream_name'])) stream_name = sd_info['stream_name'] key = sd_info['key'] stream_hash = sd_info['stream_hash'] stream_blobs = sd_info['blobs'] suggested_file_name = sd_info['suggested_file_name'] yield f2d(blob_manager.storage.add_known_blobs(stream_blobs)) yield f2d( blob_manager.storage.store_stream(stream_hash, sd_hash, stream_name, key, suggested_file_name, stream_blobs)) defer.returnValue(stream_hash)
def verify_stream_on_reflector(): # check stream_info_manager has all the right information streams = yield f2d(self.server_storage.get_all_streams()) self.assertEqual(1, len(streams)) self.assertEqual(self.stream_hash, streams[0]) blobs = yield f2d( self.server_storage.get_blobs_for_stream(self.stream_hash)) blob_hashes = [ b.blob_hash for b in blobs if b.blob_hash is not None ] expected_blob_hashes = [ b[0] for b in self.expected_blobs[:-1] if b[0] is not None ] self.assertEqual(expected_blob_hashes, blob_hashes) sd_hash = yield f2d( self.server_storage.get_sd_blob_hash_for_stream( self.stream_hash)) self.assertEqual(self.sd_hash, sd_hash) # check should_announce blobs on blob_manager to_announce = yield f2d( self.server_storage.get_all_should_announce_blobs()) self.assertSetEqual(set(to_announce), {self.sd_hash, expected_blob_hashes[0]})
def test_delete_blob(self): blob_hash = random_lbry_hash() yield self.store_fake_blob(blob_hash) blob_hashes = yield f2d(self.storage.get_all_blob_hashes()) self.assertEqual(blob_hashes, [blob_hash]) yield f2d(self.storage.delete_blobs_from_db(blob_hashes)) blob_hashes = yield f2d(self.storage.get_all_blob_hashes()) self.assertEqual(blob_hashes, [])
def delete_data(self): crypt_infos = yield f2d( self.storage.get_blobs_for_stream(self.stream_hash)) blob_hashes = [b.blob_hash for b in crypt_infos if b.blob_hash] sd_hash = yield f2d( self.storage.get_sd_blob_hash_for_stream(self.stream_hash)) blob_hashes.append(sd_hash) yield self.blob_manager.delete_blobs(blob_hashes)
def verify_stream_on_reflector(): # this protocol should not have any impact on stream info manager streams = yield f2d(self.server_storage.get_all_streams()) self.assertEqual(0, len(streams)) # there should be no should announce blobs here blob_hashes = yield f2d( self.server_storage.get_all_should_announce_blobs()) self.assertEqual(0, len(blob_hashes))
def test_invalid_sort_produces_meaningful_errors(self): sort_options = ['meta.author'] expected_message = "Failed to get 'meta.author', key 'meta' was not found." with self.assertRaisesRegex(Exception, expected_message): yield f2d(self.test_daemon.jsonrpc_file_list(sort=sort_options)) sort_options = ['metadata.foo.bar'] expected_message = "Failed to get 'metadata.foo.bar', key 'foo' was not found." with self.assertRaisesRegex(Exception, expected_message): yield f2d(self.test_daemon.jsonrpc_file_list(sort=sort_options))
def test_delete_stream(self): stream_hash = random_lbry_hash() yield self.test_store_stream(stream_hash) yield f2d(self.storage.delete_stream(stream_hash)) stream_hashes = yield f2d(self.storage.get_all_streams()) self.assertListEqual(stream_hashes, []) stream_blobs = yield f2d(self.storage.get_blobs_for_stream(stream_hash)) self.assertListEqual(stream_blobs, []) blob_hashes = yield f2d(self.storage.get_all_blob_hashes()) self.assertListEqual(blob_hashes, [])
def stop(self): lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield f2d(self.blob_manager.stop()) yield f2d(self.storage.close()) self.server_port.stopListening() rm_db_and_blob_dir(self.db_dir, self.blob_dir) if os.path.exists("test_file"): os.remove("test_file")
def test_store_stream(self, stream_hash=None): stream_hash = stream_hash or random_lbry_hash() sd_hash = random_lbry_hash() blob1 = random_lbry_hash() blob2 = random_lbry_hash() yield self.store_fake_blob(sd_hash) yield self.store_fake_blob(blob1) yield self.store_fake_blob(blob2) yield self.store_fake_stream(stream_hash, sd_hash) yield self.store_fake_stream_blob(stream_hash, blob1, 1) yield self.store_fake_stream_blob(stream_hash, blob2, 2) stream_blobs = yield f2d(self.storage.get_blobs_for_stream(stream_hash)) stream_blob_hashes = [b.blob_hash for b in stream_blobs] self.assertListEqual(stream_blob_hashes, [blob1, blob2]) blob_hashes = yield f2d(self.storage.get_all_blob_hashes()) self.assertSetEqual(set(blob_hashes), {sd_hash, blob1, blob2}) stream_blobs = yield f2d(self.storage.get_blobs_for_stream(stream_hash)) stream_blob_hashes = [b.blob_hash for b in stream_blobs] self.assertListEqual(stream_blob_hashes, [blob1, blob2]) yield f2d(self.storage.set_should_announce(sd_hash, 1, 1)) yield f2d(self.storage.set_should_announce(blob1, 1, 1)) should_announce_count = yield f2d(self.storage.count_should_announce_blobs()) self.assertEqual(should_announce_count, 2) should_announce_hashes = yield f2d(self.storage.get_blobs_to_announce()) self.assertSetEqual(set(should_announce_hashes), {sd_hash, blob1}) stream_hashes = yield f2d(self.storage.get_all_streams()) self.assertListEqual(stream_hashes, [stream_hash])
def store_fake_stream_blob(self, stream_hash, blob_hash, blob_num, length=100, iv="DEADBEEF"): blob_info = { 'blob_hash': blob_hash, 'blob_num': blob_num, 'iv': iv } if length: blob_info['length'] = length yield f2d(self.storage.add_blobs_to_stream(stream_hash, [blob_info]))
def setUp(self): conf.initialize_settings(False) self.blob_dir = tempfile.mkdtemp() self.db_dir = tempfile.mkdtemp() self.bm = DiskBlobManager(self.blob_dir, SQLiteStorage(':memory:')) self.peer = Peer('somehost', 22) yield f2d(self.bm.storage.open())
def test_ungenerous_data_and_no_fee(self): size = 10000000 data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1] correct_result = size / 10**6 * data_rate daemon = get_test_daemon(generous=False) result = yield f2d(daemon.get_est_cost("test", size)) self.assertEqual(result, round(correct_result, 1))
def delete_lbry_file(self, lbry_file, delete_file=False): if lbry_file not in self.lbry_files: raise ValueError("Could not find that LBRY file") def wait_for_finished(count=2): if count <= 0 or lbry_file.saving_status is False: return True else: return task.deferLater(reactor, 1, wait_for_finished, count=count - 1) full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name) try: yield lbry_file.stop() except (AlreadyStoppedError, CurrentlyStoppingError): yield wait_for_finished() self.lbry_files.remove(lbry_file) if lbry_file.stream_hash in self.storage.content_claim_callbacks: del self.storage.content_claim_callbacks[lbry_file.stream_hash] yield lbry_file.delete_data() yield f2d(self.storage.delete_stream(lbry_file.stream_hash)) if delete_file and os.path.isfile(full_path): os.remove(full_path) defer.returnValue(True)
def setUp(self): mocks.mock_conf_settings(self) self.tmp_db_dir, self.tmp_blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.peer_finder = FakePeerFinder(5553, self.peer_manager, 2) self.rate_limiter = DummyRateLimiter() self.sd_identifier = StreamDescriptorIdentifier() self.storage = SQLiteStorage(':memory:') self.blob_manager = DiskBlobManager(self.tmp_blob_dir, self.storage) self.prm = OnlyFreePaymentsManager() self.lbry_file_manager = EncryptedFileManager( self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, self.sd_identifier) d = f2d(self.storage.open()) d.addCallback(lambda _: f2d(self.lbry_file_manager.setup())) return d
def reflect_lbry_files(self): sem = defer.DeferredSemaphore(self.CONCURRENT_REFLECTS) ds = [] sd_hashes_to_reflect = yield f2d(self.storage.get_streams_to_re_reflect()) for lbry_file in self.lbry_files: if lbry_file.sd_hash in sd_hashes_to_reflect: ds.append(sem.run(reflect_file, lbry_file)) yield defer.DeferredList(ds)
def test_fee_and_ungenerous_data(self): size = 10000000 fake_fee_amount = 4.5 data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1] correct_result = size / 10**6 * data_rate + fake_fee_amount daemon = get_test_daemon(generous=False, with_fee=True) result = yield f2d(daemon.get_est_cost("test", size)) self.assertEqual(result, round(correct_result, 1))
def tearDown(self): lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield f2d(self.storage.close()) shutil.rmtree(self.db_dir, ignore_errors=True) if os.path.exists("test_file"): os.remove("test_file")
def manage(self): if not self.dht_node.contacts: log.info("Not ready to start announcing hashes") return need_reannouncement = yield f2d(self.storage.get_blobs_to_announce()) if need_reannouncement: yield self.immediate_announce(need_reannouncement) else: log.debug("Nothing to announce")
def _pay_key_fee(self, address, fee_lbc, name): log.info("Pay key fee %s --> %s", dewies_to_lbc(fee_lbc), address) reserved_points = self.wallet.reserve_points(address, fee_lbc) if reserved_points is None: raise InsufficientFundsError( 'Unable to pay the key fee of {} for {}'.format( dewies_to_lbc(fee_lbc), name)) return f2d(self.wallet.send_points_to_address(reserved_points, fee_lbc))
def test_supports_storage(self): claim_ids = [random_lbry_hash() for _ in range(10)] random_supports = [{ "txid": random_lbry_hash(), "nout": i, "address": f"addr{i}", "amount": f"{i}.0" } for i in range(20)] expected_supports = {} for idx, claim_id in enumerate(claim_ids): yield f2d(self.storage.save_supports(claim_id, random_supports[idx*2:idx*2+2])) for random_support in random_supports[idx*2:idx*2+2]: random_support['claim_id'] = claim_id expected_supports.setdefault(claim_id, []).append(random_support) supports = yield f2d(self.storage.get_supports(claim_ids[0])) self.assertEqual(supports, expected_supports[claim_ids[0]]) all_supports = yield f2d(self.storage.get_supports(*claim_ids)) for support in all_supports: self.assertIn(support, expected_supports[support['claim_id']])
def tearDown(self): yield self.uploader.stop() lbry_files = self.lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.lbry_file_manager.delete_lbry_file(lbry_file) yield self.lbry_file_manager.stop() yield self.blob_manager.stop() yield f2d(self.storage.close()) rm_db_and_blob_dir(self.db_dir, self.blob_dir) if os.path.exists("test_file"): os.remove("test_file")
def test_sort_by_nested_field(self): extract_authors = lambda file_list: [ f['metadata']['author'] for f in file_list ] sort_options = ['metadata.author'] file_list = yield f2d( self.test_daemon.jsonrpc_file_list(sort=sort_options)) self.assertEqual(self.test_authors, extract_authors(file_list)) # Check that the list matches the expected in reverse when sorting in descending order. sort_options = ['metadata.author,desc'] file_list = yield f2d( self.test_daemon.jsonrpc_file_list(sort=sort_options)) self.assertEqual(list(reversed(self.test_authors)), extract_authors(file_list)) # Check that the list is not sorted as expected when not sorted at all. file_list = yield f2d(self.test_daemon.jsonrpc_file_list()) self.assertNotEqual(self.test_authors, extract_authors(file_list))
def setup(self): init_conf_windows() self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.rate_limiter = RateLimiter() if self.ul_rate_limit is not None: self.rate_limiter.set_ul_limit(self.ul_rate_limit) self.prm = OnlyFreePaymentsManager() self.storage = SQLiteStorage(':memory:') self.blob_manager = DiskBlobManager(self.blob_dir, self.storage) self.lbry_file_manager = EncryptedFileManager( FakePeerFinder(5553, self.peer_manager, 1), self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, StreamDescriptorIdentifier()) yield f2d(self.storage.open()) yield f2d(self.blob_manager.setup()) yield f2d(self.lbry_file_manager.setup()) query_handler_factories = { 1: BlobAvailabilityHandlerFactory(self.blob_manager), 2: BlobRequestHandlerFactory(self.blob_manager, self.wallet, self.prm, None), 3: self.wallet.get_wallet_info_query_handler_factory(), } server_factory = ServerProtocolFactory(self.rate_limiter, query_handler_factories, self.peer_manager) self.server_port = reactor.listenTCP(5553, server_factory, interface="localhost") test_file = GenFile(self.file_size, bytes(i for i in range(0, 64, 6))) lbry_file = yield create_lbry_file(self.blob_manager, self.storage, self.prm, self.lbry_file_manager, "test_file", test_file) defer.returnValue(lbry_file.sd_hash)
def _download(self, sd_blob, name, key_fee, txid, nout, file_name=None): self.downloader = yield self._create_downloader(sd_blob, file_name=file_name) yield self.pay_key_fee(key_fee, name) yield f2d( self.storage.save_content_claim(self.downloader.stream_hash, "%s:%i" % (txid, nout))) self.finished_deferred = self.downloader.start() self.downloader.download_manager.progress_manager.wrote_first_data.addCallback( self.data_downloading_deferred.callback) self.finished_deferred.addCallbacks( lambda result: self.finish(result, name), self.fail)
def handle_queries(self, queries): if self.query_identifiers[0] in queries: address = yield f2d( self.wallet.get_unused_address_for_peer(self.peer)) self.address = address fields = {'lbrycrd_address': address} return fields if self.address is None: raise Exception( "Expected a request for an address, but did not receive one") else: return {}
def setUp(self): mocks.mock_conf_settings(self) self.db_dir, self.blob_dir = mk_db_and_blob_dir() self.wallet = FakeWallet() self.peer_manager = PeerManager() self.peer_finder = FakePeerFinder(5553, self.peer_manager, 1) self.rate_limiter = RateLimiter() self.prm = OnlyFreePaymentsManager() self.storage = SQLiteStorage(':memory:') self.blob_manager = DiskBlobManager(self.blob_dir, self.storage) self.sd_identifier = StreamDescriptorIdentifier() self.lbry_file_manager = EncryptedFileManager( self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage, self.sd_identifier) self.uploader = LbryUploader(5209343) self.sd_hash = yield self.uploader.setup() yield f2d(self.storage.open()) yield f2d(self.blob_manager.setup()) yield f2d(self.lbry_file_manager.setup()) yield add_lbry_file_to_sd_identifier(self.sd_identifier)
def download_stream(self, stream_hash, sd_hash): stream_crypt_blobs = yield f2d(self.blob_manager.storage.get_blobs_for_stream(stream_hash)) self.blob_hashes.extend([ b.blob_hash for b in stream_crypt_blobs if b.blob_hash and b.blob_hash not in self.blob_hashes ]) if sd_hash not in self.sd_hashes: self.sd_hashes.append(sd_hash) head_blob_hash = stream_crypt_blobs[0].blob_hash if head_blob_hash not in self.head_blob_hashes: self.head_blob_hashes.append(head_blob_hash) yield self.start()
def tearDown(self): lbry_files = self.client_lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.client_lbry_file_manager.delete_lbry_file(lbry_file) yield self.client_lbry_file_manager.stop() yield f2d(self.client_storage.close()) self.reflector_port.stopListening() lbry_files = self.server_lbry_file_manager.lbry_files for lbry_file in lbry_files: yield self.server_lbry_file_manager.delete_lbry_file(lbry_file) yield self.server_lbry_file_manager.stop() yield f2d(self.server_storage.close()) try: rm_db_and_blob_dir(self.client_db_dir, self.client_blob_dir) except Exception as err: raise unittest.SkipTest("TODO: fix this for windows") try: rm_db_and_blob_dir(self.server_db_dir, self.server_blob_dir) except Exception as err: raise unittest.SkipTest("TODO: fix this for windows") if os.path.exists("test_file"): os.remove("test_file")
def store_result(self, result): if not self.needed_blobs or len(self.reflected_blobs) == len( self.needed_blobs): reflected = True else: reflected = False d = f2d( self.blob_manager.storage.update_reflected_stream( self.sd_hash, self.transport.getPeer().host, reflected)) d.addCallback(lambda _: result) return d
def do_store(self, blob_hash): storing_node_ids = yield self.dht_node.announceHaveBlob( binascii.unhexlify(blob_hash)) now = self.clock.seconds() if storing_node_ids: result = (now, storing_node_ids) yield f2d(self.storage.update_last_announced_blob(blob_hash, now)) log.debug("Stored %s to %i peers", blob_hash[:16], len(storing_node_ids)) else: result = (None, []) self.hash_queue.remove(blob_hash) defer.returnValue(result)