def test_delete_gatekeeper_appointments(gatekeeper): # delete_gatekeeper_appointments should delete the appointments from user as long as both exist appointments_not_to_delete = {} appointments_to_delete = {} # Let's add some users and appointments to the Gatekeeper for _ in range(10): user_id = get_random_value_hex(16) # The UserInfo params do not matter much here gatekeeper.registered_users[user_id] = UserInfo(available_slots=100, subscription_expiry=0) for _ in range(random.randint(0, 10)): # Add some appointments uuid = get_random_value_hex(16) gatekeeper.registered_users[user_id].appointments[uuid] = 1 if random.randint(0, 1) % 2: appointments_to_delete[uuid] = user_id else: appointments_not_to_delete[uuid] = user_id # Now let's delete half of them Cleaner.delete_gatekeeper_appointments(gatekeeper, appointments_to_delete) all_appointments_gatekeeper = [] # Let's get all the appointments in the Gatekeeper for user_id, user in gatekeeper.registered_users.items(): all_appointments_gatekeeper.extend(user.appointments) # Check that the first half of the appointments are not in the Gatekeeper, but the second half is assert not set(appointments_to_delete).issubset( all_appointments_gatekeeper) assert set(appointments_not_to_delete).issubset( all_appointments_gatekeeper)
def test_delete_outdated_users(gatekeeper): # This tests the deletion of users whose subscription has outdated (subscription expires now) # Create some users with associated data and add them to the gatekeeper users = {} current_height = gatekeeper.block_processor.get_block_count() for _ in range(10): appointments = { get_random_value_hex(32): Appointment(get_random_value_hex(32), None, None) } user_id = get_random_value_hex(16) user_info = UserInfo(available_slots=100, subscription_expiry=current_height, appointments=appointments) users[user_id] = user_info gatekeeper.registered_users[user_id] = user_info # Get a list of the users that should be deleted at this block height (must match the newly generated ones) users_to_be_deleted = gatekeeper.get_outdated_user_ids( current_height + gatekeeper.expiry_delta) assert users_to_be_deleted == list(users.keys()) # Delete the users Cleaner.delete_outdated_users(users_to_be_deleted, gatekeeper.registered_users, gatekeeper.user_db) # Check that the users are not in the gatekeeper anymore for user_id in users_to_be_deleted: assert user_id not in gatekeeper.registered_users assert not gatekeeper.user_db.load_user(user_id)
def test_delete_appointments(db_manager): # Tests deleting appointment data both from memory and the database for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments( db_manager, MAX_ITEMS) outdated_appointments = random.sample(list(appointments.keys()), k=ITEMS) # Check that the data is there before deletion all_uuids = list(flatten(locator_uuid_map.values())) assert set(outdated_appointments).issubset(appointments.keys()) assert set(outdated_appointments).issubset(all_uuids) db_appointments = db_manager.load_watcher_appointments() assert set(outdated_appointments).issubset(db_appointments.keys()) # Delete Cleaner.delete_appointments(outdated_appointments, appointments, locator_uuid_map, db_manager) # Data is not in memory anymore all_uuids = list(flatten(locator_uuid_map.values())) assert not set(outdated_appointments).issubset(appointments.keys()) assert not set(outdated_appointments).issubset(all_uuids) # And neither is in the database db_appointments = db_manager.load_watcher_appointments() assert not set(outdated_appointments).issubset(db_appointments.keys())
def test_delete_trackers(db_manager): # Tests de deletion of trackers # Completed and outdated trackers are deleted using the same method. The only difference is the logging message height = 0 for _ in range(ITERATIONS): trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS) selected_trackers = random.sample(list(trackers.keys()), k=ITEMS) # Delete the selected trackers {uuid:confirmation_count} completed_trackers = {tracker: 6 for tracker in selected_trackers} Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) # Check that the data is not in memory anymore all_trackers = list(flatten(tx_tracker_map.values())) assert not set(completed_trackers).issubset(trackers) assert not set(completed_trackers).issubset(all_trackers) # And neither is in the db db_trackers = db_manager.load_responder_trackers() assert not set(completed_trackers).issubset(db_trackers) # Check that the data has also been removed from the Watchers db (appointment and triggered flag) all_appointments = db_manager.load_watcher_appointments( include_triggered=True) all_flags = db_manager.load_all_triggered_flags() assert not set(completed_trackers).issubset(all_appointments) assert not set(completed_trackers).issubset(all_flags)
def test_delete_appointment_from_db(db_manager): appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) for uuid in list(appointments.keys()): Cleaner.delete_appointment_from_db(uuid, db_manager) # The appointment should have been deleted from memory, but not from the db assert uuid in appointments assert db_manager.load_watcher_appointment(uuid) is None
def do_watch(self): """ Monitors the blockchain whilst there are pending trackers. This is the main method of the :obj:`Responder` and triggers tracker cleaning, rebroadcasting, reorg managing, etc. """ # Distinguish fresh bootstraps from bootstraps from db if self.last_known_block is None: self.last_known_block = self.block_processor.get_best_block_hash() while True: block_hash = self.block_queue.get() block = self.block_processor.get_block(block_hash) logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) if len(self.trackers) > 0 and block is not None: txids = block.get("tx") if self.last_known_block == block.get("previousblockhash"): self.check_confirmations(txids) height = block.get("height") completed_trackers = self.get_completed_trackers(height) Cleaner.delete_completed_trackers(completed_trackers, height, self.trackers, self.tx_tracker_map, self.db_manager) txs_to_rebroadcast = self.get_txs_to_rebroadcast() self.rebroadcast(txs_to_rebroadcast) # NOTCOVERED else: logger.warning( "Reorg found", local_prev_block_hash=self.last_known_block, remote_prev_block_hash=block.get("previousblockhash"), ) # ToDo: #24-properly-handle-reorgs self.handle_reorgs(block_hash) # Clear the receipts issued in this block self.carrier.issued_receipts = {} if len(self.trackers) is 0: logger.info("No more pending trackers") # Register the last processed block for the responder self.db_manager.store_last_block_hash_responder(block_hash) self.last_known_block = block.get("hash") self.block_queue.task_done()
def test_delete_expired_appointment(db_manager): for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments( db_manager, MAX_ITEMS) expired_appointments = random.sample(list(appointments.keys()), k=ITEMS) Cleaner.delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager) assert not set(expired_appointments).issubset(appointments.keys())
def delete_appointments(self, appointments): """ Proxy function to clean completed / outdated data from the gatekeeper. It uses the :obj:`Cleaner <teos.cleaner.Cleaner>`, but allows the :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder <teos.responder.Responder>` to call it without having to handle internal stuff from the :obj:`Gatekeeper`. Args: appointments (:obj:`dict`): A collection of appointments to be deleted. """ with self.rw_lock.gen_wlock(): Cleaner.delete_gatekeeper_appointments(appointments, self.registered_users, self.user_db)
def test_delete_completed_trackers_db_match(db_manager): height = 0 for _ in range(ITERATIONS): trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS) selected_trackers = random.sample(list(trackers.keys()), k=ITEMS) completed_trackers = {tracker: 6 for tracker in selected_trackers} Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) assert not set(completed_trackers).issubset(trackers.keys())
def test_delete_trackers_db_match(db_manager): # Completed and expired trackers are deleted using the same method. The only difference is the logging message height = 0 for _ in range(ITERATIONS): trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS) selected_trackers = random.sample(list(trackers.keys()), k=ITEMS) completed_trackers = {tracker: 6 for tracker in selected_trackers} Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) assert not set(completed_trackers).issubset(trackers.keys())
def test_update_delete_db_locator_map(db_manager): appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) for uuid, appointment in appointments.items(): locator = appointment.get("locator") locator_map_before = db_manager.load_locator_map(locator) Cleaner.update_delete_db_locator_map([uuid], locator, db_manager) locator_map_after = db_manager.load_locator_map(locator) if locator_map_after is None: assert locator_map_before is not None else: assert uuid in locator_map_before and uuid not in locator_map_after
def test_delete_outdated_users(users_db_manager): # Tests the deletion of users whose subscription has outdated (subscription expires now) # Let's mock adding some users and appointments to the Gatekeeper (memory and db) registered_users = setup_users(users_db_manager, MAX_ITEMS) # Delete the users to_be_deleted = list(registered_users.keys()) Cleaner.delete_outdated_users(to_be_deleted, registered_users, users_db_manager) # Check that the users are not in the gatekeeper anymore for user_id in to_be_deleted: assert user_id not in registered_users assert not users_db_manager.load_user(user_id)
def test_flag_triggered_appointments(db_manager): for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments( db_manager, MAX_ITEMS) triggered_appointments = random.sample(list(appointments.keys()), k=ITEMS) len_before_clean = len(appointments) Cleaner.flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager) # ITEMS appointments should have been deleted from memory assert len(appointments) == len_before_clean - ITEMS # Make sure that all appointments are flagged as triggered in the db db_appointments = db_manager.load_all_triggered_flags() assert set(triggered_appointments).issubset(db_appointments)
def test_delete_completed_appointments(db_manager): for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments( db_manager, MAX_ITEMS) completed_appointments = random.sample(list(appointments.keys()), k=ITEMS) len_before_clean = len(appointments) Cleaner.delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager) # ITEMS appointments should have been deleted from memory assert len(appointments) == len_before_clean - ITEMS # Make sure they are not in the db either db_appointments = db_manager.load_watcher_appointments( include_triggered=True) assert not set(completed_appointments).issubset(db_appointments)
def test_flag_triggered_appointments(db_manager): # Test that when an appointment is flagged and triggered it is deleted from memory and the flags are added to the db for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments( db_manager, MAX_ITEMS) triggered_appointments = random.sample(list(appointments.keys()), k=ITEMS) # Flag the appointments Cleaner.flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager) # Check that the flagged appointments are not in memory anymore assert not set(triggered_appointments).issubset(appointments) # Make sure that all appointments are flagged as triggered in the db db_appointments = db_manager.load_all_triggered_flags() assert set(triggered_appointments).issubset(db_appointments)
def manage_subscription_expiry(self): """ Manages the subscription expiry of the registered users. Subscriptions are not deleted straightaway for two purposes: - First, it gives time to the ``Watcher`` and the ``Responder`` to query the necessary data for housekeeping, and gives some reorg protection. - Second, it gives a grace time to the user to renew their subscription before it is irrevocably deleted. """ while True: block_hash = self.block_queue.get() # When the ChainMonitor is stopped, a final ChainMonitor.END_MESSAGE message is sent if block_hash == ChainMonitor.END_MESSAGE: break # Expired user deletion is delayed. Users are deleted when their subscription is outdated, not expired. block_height = self.block_processor.get_block(block_hash, blocking=True).get("height") self.update_outdated_users_cache(block_height) Cleaner.delete_outdated_users(self.get_outdated_user_ids(block_height), self.registered_users, self.user_db)
def test_delete_completed_trackers_no_db_match(db_manager): height = 0 for _ in range(ITERATIONS): trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS) selected_trackers = random.sample(list(trackers.keys()), k=ITEMS) # Let's change some uuid's by creating new trackers that are not included in the db and share a penalty_txid # with another tracker that is stored in the db. for uuid in selected_trackers[:ITEMS // 2]: penalty_txid = trackers[uuid].get("penalty_txid") dispute_txid = get_random_value_hex(32) locator = dispute_txid[:LOCATOR_LEN_HEX] new_uuid = uuid4().hex trackers[new_uuid] = { "locator": locator, "penalty_txid": penalty_txid } tx_tracker_map[penalty_txid].append(new_uuid) selected_trackers.append(new_uuid) # Let's add some random data for i in range(ITEMS // 2): uuid = uuid4().hex penalty_txid = get_random_value_hex(32) dispute_txid = get_random_value_hex(32) locator = dispute_txid[:LOCATOR_LEN_HEX] trackers[uuid] = {"locator": locator, "penalty_txid": penalty_txid} tx_tracker_map[penalty_txid] = [uuid] selected_trackers.append(uuid) completed_trackers = {tracker: 6 for tracker in selected_trackers} # We should be able to delete the correct ones and not fail in the others Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) assert not set(completed_trackers).issubset(trackers.keys())
def test_delete_gatekeeper_appointments(users_db_manager): # Tests that the Cleaner properly deletes the appointment data from the Gatekeeper structures (both memory and db) appointments_not_to_delete = {} appointments_to_delete = {} # Let's mock adding some users and appointments to the Gatekeeper (memory and db) registered_users = setup_users(users_db_manager, MAX_ITEMS) for user_id, user_info in registered_users.items(): for uuid in user_info.appointments.keys(): if random.randint(0, 1) % 2: appointments_to_delete[uuid] = user_id else: appointments_not_to_delete[uuid] = user_id # Now let's delete half of them Cleaner.delete_gatekeeper_appointments(appointments_to_delete, registered_users, users_db_manager) # Let's get all the appointments in the Gatekeeper all_appointments_gatekeeper = list( flatten(user.appointments for _, user in registered_users.items())) # Check that the first half of the appointments are not in the Gatekeeper, but the second half is assert not set(appointments_to_delete).issubset( all_appointments_gatekeeper) assert set(appointments_not_to_delete).issubset( all_appointments_gatekeeper) # Also check in the database db_user_data = users_db_manager.load_all_users() all_appointments_db = [ user_data.get("appointments") for user_data in db_user_data.values() ] all_appointments_db = list(flatten(all_appointments_db)) assert not set(appointments_to_delete).issubset(all_appointments_db) assert set(appointments_not_to_delete).issubset(all_appointments_db)
def do_watch(self): """ Monitors the blockchain for reorgs and appointment ends. This is the main method of the :obj:`Responder` and triggers tracker cleaning, rebroadcasting, reorg managing, etc. """ # Distinguish fresh bootstraps from bootstraps from db if self.last_known_block is None: self.last_known_block = self.block_processor.get_best_block_hash( blocking=True) self.db_manager.store_last_block_hash_responder( self.last_known_block) while True: block_hash = self.block_queue.get() # When the ChainMonitor is stopped, a final ChainMonitor.END_MESSAGE is sent if block_hash == ChainMonitor.END_MESSAGE: break block = self.block_processor.get_block(block_hash, blocking=True) self.logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) if len(self.trackers) > 0 and block is not None: txids = block.get("tx") if self.last_known_block == block.get("previousblockhash"): with self.rw_lock.gen_wlock(): completed_trackers = self.get_completed_trackers() outdated_trackers = self.get_outdated_trackers( block.get("height")) trackers_to_delete_gatekeeper = { uuid: self.trackers[uuid].get("user_id") for uuid in completed_trackers } self.check_confirmations(txids) Cleaner.delete_trackers(completed_trackers, block.get("height"), self.trackers, self.tx_tracker_map, self.db_manager) Cleaner.delete_trackers( outdated_trackers, block.get("height"), self.trackers, self.tx_tracker_map, self.db_manager, outdated=True, ) # Remove completed trackers from the Gatekeeper self.gatekeeper.delete_appointments( trackers_to_delete_gatekeeper) self.rebroadcast(self.get_txs_to_rebroadcast()) # NOTCOVERED else: self.logger.warning( "Reorg found", local_prev_block_hash=self.last_known_block, remote_prev_block_hash=block.get("previousblockhash"), ) # ToDo: #24-properly-handle-reorgs self.handle_reorgs(block_hash) # Clear the receipts issued in this block self.carrier.issued_receipts = {} if len(self.trackers) == 0: self.logger.info("No more pending trackers") # Register the last processed block for the responder self.db_manager.store_last_block_hash_responder(block_hash) self.last_known_block = block.get("hash") self.block_queue.task_done()
def do_watch(self): """ Monitors the blockchain for channel breaches. This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the :obj:`Responder <teos.responder.Responder>` upon detecting a breach. """ # Distinguish fresh bootstraps from bootstraps from db if self.last_known_block is None: self.last_known_block = self.block_processor.get_best_block_hash() self.db_manager.store_last_block_hash_watcher( self.last_known_block) # Initialise the locator cache with the last ``cache_size`` blocks. self.locator_cache.init(self.last_known_block, self.block_processor) while True: block_hash = self.block_queue.get() # When the ChainMonitor is stopped, a final ChainMonitor.END_MESSAGE message is sent if block_hash == ChainMonitor.END_MESSAGE: break block = self.block_processor.get_block(block_hash) self.logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) # If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain if self.last_known_block != block.get("previousblockhash"): self.locator_cache.fix(block_hash, self.block_processor) txids = block.get("tx") # Compute the locator for every transaction in the block and add them to the cache locator_txid_map = {compute_locator(txid): txid for txid in txids} self.locator_cache.update(block_hash, locator_txid_map) with self.rw_lock.gen_wlock(): if len(self.appointments) > 0 and locator_txid_map: outdated_appointments = self.gatekeeper.get_outdated_appointments( block["height"]) # Make sure we only try to delete what is on the Watcher (some appointments may have been triggered) outdated_appointments = list( set(outdated_appointments).intersection( self.appointments.keys())) Cleaner.delete_outdated_appointments( outdated_appointments, self.appointments, self.locator_uuid_map, self.db_manager) valid_breaches, invalid_breaches = self.filter_breaches( self.get_breaches(locator_txid_map)) triggered_flags = [] appointments_to_delete = [] for uuid, breach in valid_breaches.items(): self.logger.info( "Notifying responder and deleting appointment", penalty_txid=breach["penalty_txid"], locator=breach["locator"], uuid=uuid, ) receipt = self.responder.handle_breach( uuid, breach["locator"], breach["dispute_txid"], breach["penalty_txid"], breach["penalty_rawtx"], self.appointments[uuid].get("user_id"), block_hash, ) # FIXME: Only necessary because of the triggered appointment approach. Fix if it changes. if receipt.delivered: Cleaner.delete_appointment_from_memory( uuid, self.appointments, self.locator_uuid_map) triggered_flags.append(uuid) else: appointments_to_delete.append(uuid) # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted. appointments_to_delete.extend(invalid_breaches) appointments_to_delete_gatekeeper = { uuid: self.appointments[uuid].get("user_id") for uuid in appointments_to_delete } self.db_manager.batch_create_triggered_appointment_flag( triggered_flags) Cleaner.delete_completed_appointments( appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager) # Remove invalid appointments from the Gatekeeper self.gatekeeper.delete_appointments( appointments_to_delete_gatekeeper) if not self.appointments: self.logger.info("No more pending appointments") # Register the last processed block for the Watcher self.db_manager.store_last_block_hash_watcher(block_hash) self.last_known_block = block.get("hash") self.block_queue.task_done()
def do_watch(self): """ Monitors the blockchain whilst there are pending appointments. This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the :obj:`Responder <teos.responder.Responder>` upon detecting a breach. """ while True: block_hash = self.block_queue.get() block = self.block_processor.get_block(block_hash) logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) if len(self.appointments) > 0 and block is not None: txids = block.get("tx") expired_appointments = [ uuid for uuid, appointment_data in self.appointments.items() if block["height"] > appointment_data.get("end_time") + self.expiry_delta ] Cleaner.delete_expired_appointments( expired_appointments, self.appointments, self.locator_uuid_map, self.db_manager ) valid_breaches, invalid_breaches = self.filter_valid_breaches(self.get_breaches(txids)) triggered_flags = [] appointments_to_delete = [] for uuid, breach in valid_breaches.items(): logger.info( "Notifying responder and deleting appointment", penalty_txid=breach["penalty_txid"], locator=breach["locator"], uuid=uuid, ) receipt = self.responder.handle_breach( uuid, breach["locator"], breach["dispute_txid"], breach["penalty_txid"], breach["penalty_rawtx"], self.appointments[uuid].get("end_time"), block_hash, ) # FIXME: Only necessary because of the triggered appointment approach. Fix if it changes. if receipt.delivered: Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) triggered_flags.append(uuid) else: appointments_to_delete.append(uuid) # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted. appointments_to_delete.extend(invalid_breaches) self.db_manager.batch_create_triggered_appointment_flag(triggered_flags) Cleaner.delete_completed_appointments( appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager ) if len(self.appointments) is 0: logger.info("No more pending appointments") # Register the last processed block for the watcher self.db_manager.store_last_block_hash_watcher(block_hash) self.block_queue.task_done()