def test_delete_gatekeeper_appointments(gatekeeper):
    # delete_gatekeeper_appointments should delete the appointments from user as long as both exist

    appointments_not_to_delete = {}
    appointments_to_delete = {}
    # Let's add some users and appointments to the Gatekeeper
    for _ in range(10):
        user_id = get_random_value_hex(16)
        # The UserInfo params do not matter much here
        gatekeeper.registered_users[user_id] = UserInfo(available_slots=100,
                                                        subscription_expiry=0)
        for _ in range(random.randint(0, 10)):
            # Add some appointments
            uuid = get_random_value_hex(16)
            gatekeeper.registered_users[user_id].appointments[uuid] = 1

            if random.randint(0, 1) % 2:
                appointments_to_delete[uuid] = user_id
            else:
                appointments_not_to_delete[uuid] = user_id

    # Now let's delete half of them
    Cleaner.delete_gatekeeper_appointments(gatekeeper, appointments_to_delete)

    all_appointments_gatekeeper = []
    # Let's get all the appointments in the Gatekeeper
    for user_id, user in gatekeeper.registered_users.items():
        all_appointments_gatekeeper.extend(user.appointments)

    # Check that the first half of the appointments are not in the Gatekeeper, but the second half is
    assert not set(appointments_to_delete).issubset(
        all_appointments_gatekeeper)
    assert set(appointments_not_to_delete).issubset(
        all_appointments_gatekeeper)
Esempio n. 2
0
    def delete_appointments(self, appointments):
        """
        Proxy function to clean completed / outdated data from the gatekeeper. It uses the
        :obj:`Cleaner <teos.cleaner.Cleaner>`, but allows the :obj:`Watcher <teos.watcher.Watcher>` and the
        :obj:`Responder <teos.responder.Responder>` to call it without having to handle internal stuff from the
        :obj:`Gatekeeper`.

        Args:
            appointments (:obj:`dict`): A collection of appointments to be deleted.
        """
        with self.rw_lock.gen_wlock():
            Cleaner.delete_gatekeeper_appointments(appointments, self.registered_users, self.user_db)
Esempio n. 3
0
def test_delete_gatekeeper_appointments(users_db_manager):
    # Tests that the Cleaner properly deletes the appointment data from the Gatekeeper structures (both memory and db)
    appointments_not_to_delete = {}
    appointments_to_delete = {}

    # Let's mock adding some users and appointments to the Gatekeeper (memory and db)
    registered_users = setup_users(users_db_manager, MAX_ITEMS)

    for user_id, user_info in registered_users.items():
        for uuid in user_info.appointments.keys():
            if random.randint(0, 1) % 2:
                appointments_to_delete[uuid] = user_id
            else:
                appointments_not_to_delete[uuid] = user_id

    # Now let's delete half of them
    Cleaner.delete_gatekeeper_appointments(appointments_to_delete,
                                           registered_users, users_db_manager)

    # Let's get all the appointments in the Gatekeeper
    all_appointments_gatekeeper = list(
        flatten(user.appointments for _, user in registered_users.items()))

    # Check that the first half of the appointments are not in the Gatekeeper, but the second half is
    assert not set(appointments_to_delete).issubset(
        all_appointments_gatekeeper)
    assert set(appointments_not_to_delete).issubset(
        all_appointments_gatekeeper)

    # Also check in the database
    db_user_data = users_db_manager.load_all_users()
    all_appointments_db = [
        user_data.get("appointments") for user_data in db_user_data.values()
    ]
    all_appointments_db = list(flatten(all_appointments_db))
    assert not set(appointments_to_delete).issubset(all_appointments_db)
    assert set(appointments_not_to_delete).issubset(all_appointments_db)
Esempio n. 4
0
    def do_watch(self):
        """
        Monitors the blockchain for channel breaches.

        This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the
        :obj:`Responder <teos.responder.Responder>` upon detecting a breach.
        """

        # Distinguish fresh bootstraps from bootstraps from db
        if self.last_known_block is None:
            self.last_known_block = self.block_processor.get_best_block_hash()
            self.db_manager.store_last_block_hash_watcher(
                self.last_known_block)

        # Initialise the locator cache with the last ``cache_size`` blocks.
        self.locator_cache.init(self.last_known_block, self.block_processor)

        while True:
            block_hash = self.block_queue.get()
            block = self.block_processor.get_block(block_hash)
            logger.info("New block received",
                        block_hash=block_hash,
                        prev_block_hash=block.get("previousblockhash"))

            # If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain
            if self.last_known_block != block.get("previousblockhash"):
                self.locator_cache.fix(block_hash, self.block_processor)

            txids = block.get("tx")
            # Compute the locator for every transaction in the block and add them to the cache
            locator_txid_map = {compute_locator(txid): txid for txid in txids}
            self.locator_cache.update(block_hash, locator_txid_map)

            if len(self.appointments) > 0 and locator_txid_map:
                expired_appointments = self.gatekeeper.get_expired_appointments(
                    block["height"])
                # Make sure we only try to delete what is on the Watcher (some appointments may have been triggered)
                expired_appointments = list(
                    set(expired_appointments).intersection(
                        self.appointments.keys()))

                # Keep track of the expired appointments before deleting them from memory
                appointments_to_delete_gatekeeper = {
                    uuid: self.appointments[uuid].get("user_id")
                    for uuid in expired_appointments
                }

                Cleaner.delete_expired_appointments(expired_appointments,
                                                    self.appointments,
                                                    self.locator_uuid_map,
                                                    self.db_manager)

                valid_breaches, invalid_breaches = self.filter_breaches(
                    self.get_breaches(locator_txid_map))

                triggered_flags = []
                appointments_to_delete = []

                for uuid, breach in valid_breaches.items():
                    logger.info(
                        "Notifying responder and deleting appointment",
                        penalty_txid=breach["penalty_txid"],
                        locator=breach["locator"],
                        uuid=uuid,
                    )

                    receipt = self.responder.handle_breach(
                        uuid,
                        breach["locator"],
                        breach["dispute_txid"],
                        breach["penalty_txid"],
                        breach["penalty_rawtx"],
                        self.appointments[uuid].get("user_id"),
                        block_hash,
                    )

                    # FIXME: Only necessary because of the triggered appointment approach. Fix if it changes.

                    if receipt.delivered:
                        Cleaner.delete_appointment_from_memory(
                            uuid, self.appointments, self.locator_uuid_map)
                        triggered_flags.append(uuid)
                    else:
                        appointments_to_delete.append(uuid)

                # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted.
                appointments_to_delete.extend(invalid_breaches)
                self.db_manager.batch_create_triggered_appointment_flag(
                    triggered_flags)

                # Update the dictionary with the completed appointments
                appointments_to_delete_gatekeeper.update({
                    uuid: self.appointments[uuid].get("user_id")
                    for uuid in appointments_to_delete
                })

                Cleaner.delete_completed_appointments(appointments_to_delete,
                                                      self.appointments,
                                                      self.locator_uuid_map,
                                                      self.db_manager)

                # Remove expired and completed appointments from the Gatekeeper
                Cleaner.delete_gatekeeper_appointments(
                    self.gatekeeper, appointments_to_delete_gatekeeper)

                if len(self.appointments) != 0:
                    logger.info("No more pending appointments")

            # Register the last processed block for the Watcher
            self.db_manager.store_last_block_hash_watcher(block_hash)
            self.last_known_block = block.get("hash")
            self.block_queue.task_done()
Esempio n. 5
0
    def do_watch(self):
        """
        Monitors the blockchain for reorgs and appointment ends.

        This is the main method of the :obj:`Responder` and triggers tracker cleaning, rebroadcasting, reorg managing,
        etc.
        """

        # Distinguish fresh bootstraps from bootstraps from db
        if self.last_known_block is None:
            self.last_known_block = self.block_processor.get_best_block_hash()
            self.db_manager.store_last_block_hash_responder(self.last_known_block)

        while True:
            block_hash = self.block_queue.get()
            block = self.block_processor.get_block(block_hash)
            logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash"))

            if len(self.trackers) > 0 and block is not None:
                txids = block.get("tx")

                completed_trackers = self.get_completed_trackers()
                expired_trackers = self.get_expired_trackers(block.get("height"))
                trackers_to_delete_gatekeeper = {
                    uuid: self.trackers[uuid].get("user_id") for uuid in completed_trackers + expired_trackers
                }

                if self.last_known_block == block.get("previousblockhash"):
                    self.check_confirmations(txids)
                    Cleaner.delete_trackers(
                        completed_trackers, block.get("height"), self.trackers, self.tx_tracker_map, self.db_manager
                    )
                    Cleaner.delete_trackers(
                        expired_trackers,
                        block.get("height"),
                        self.trackers,
                        self.tx_tracker_map,
                        self.db_manager,
                        expired=True,
                    )
                    Cleaner.delete_gatekeeper_appointments(self.gatekeeper, trackers_to_delete_gatekeeper)

                    self.rebroadcast(self.get_txs_to_rebroadcast())

                # NOTCOVERED
                else:
                    logger.warning(
                        "Reorg found",
                        local_prev_block_hash=self.last_known_block,
                        remote_prev_block_hash=block.get("previousblockhash"),
                    )

                    # ToDo: #24-properly-handle-reorgs
                    self.handle_reorgs(block_hash)

                # Clear the receipts issued in this block
                self.carrier.issued_receipts = {}

                if len(self.trackers) == 0:
                    logger.info("No more pending trackers")

            # Register the last processed block for the responder
            self.db_manager.store_last_block_hash_responder(block_hash)
            self.last_known_block = block.get("hash")
            self.block_queue.task_done()