def test_notify_subscribers(block_processor): queue1 = Queue() queue2 = Queue() chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params) # Queues should be empty to start with assert queue1.qsize() == 0 assert queue2.qsize() == 0 block1 = get_random_value_hex(32) block2 = get_random_value_hex(32) block3 = get_random_value_hex(32) # we add two elements to the internal queue before the thread is started chain_monitor.queue.put(block1) chain_monitor.queue.put(block2) assert queue1.qsize() == 0 assert queue2.qsize() == 0 notifying_thread = Thread(target=chain_monitor.notify_subscribers, daemon=True) notifying_thread.start() # the existing elements should be processed soon and in order for all queues for q in [queue1, queue2]: assert q.get(timeout=0.1) == block1 assert q.get(timeout=0.1) == block2 # Subscribers are only notified as long as they are awake chain_monitor.queue.put(block3) assert queue1.get(timeout=0.1) == block3 assert queue2.get(timeout=0.1) == block3 chain_monitor.terminate()
def test_monitor_chain_polling(block_processor_mock, monkeypatch): # Monkeypatch the BlockProcessor so the best tip remains unchanged fixed_tip = get_random_value_hex(32) monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: fixed_tip) chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params) chain_monitor.last_tips = [fixed_tip] chain_monitor.polling_delta = 0.1 # monitor_chain_polling runs until not terminated polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True) polling_thread.start() # Check that nothing changes as long as a block is not generated for _ in range(5): assert chain_monitor.queue.empty() time.sleep(0.1) # And that it does if we generate a block monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32)) time.sleep(0.1) chain_monitor.queue.get() assert chain_monitor.queue.empty() # Check that the bitcoind_reachable event is cleared if the connection is lost, and set once it's recovered monkeypatch.setattr(block_processor_mock, "get_best_block_hash", mock_connection_refused_return) time.sleep(0.5) assert not chain_monitor.bitcoind_reachable.is_set() monkeypatch.delattr(block_processor_mock, "get_best_block_hash") time.sleep(0.5) assert chain_monitor.bitcoind_reachable.is_set() chain_monitor.terminate()
def test_monitor_chain_single_update(block_processor): # This test tests that if both threads try to add the same block to the queue, only the first one will make it chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params) chain_monitor.polling_delta = 2 # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only # been added once. chain_monitor.monitor_chain() chain_monitor.activate() generate_blocks(1) assert len(chain_monitor.receiving_queues) == 2 queue0_block = chain_monitor.receiving_queues[0].get() queue1_block = chain_monitor.receiving_queues[1].get() assert queue0_block == queue1_block assert chain_monitor.receiving_queues[0].empty() assert chain_monitor.receiving_queues[1].empty() # The delta for polling is 2 secs, so let's wait and see time.sleep(2) assert chain_monitor.receiving_queues[0].empty() assert chain_monitor.receiving_queues[1].empty() # We can also force an update and see that it won't go through assert chain_monitor.enqueue(queue0_block) is False chain_monitor.terminate() # The zmq thread needs a block generation to release from the recv method. generate_blocks(1)
def test_threads_stop_when_terminated(block_processor): # When status is "terminated", the methods running the threads should stop immediately chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params) chain_monitor.terminate() # If any of the function does not exit immediately, the test will timeout chain_monitor.monitor_chain_polling() chain_monitor.monitor_chain_zmq() chain_monitor.notify_subscribers()
def responder(db_manager, gatekeeper, carrier, block_processor): responder = Responder(db_manager, gatekeeper, carrier, block_processor) chain_monitor = ChainMonitor([Queue(), responder.block_queue], block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() responder_thread = responder.awake() chain_monitor.activate() yield responder chain_monitor.terminate() responder_thread.join()
def test_monitor_chain_polling(db_manager, block_processor): # Try polling with the Watcher watcher_queue = Queue() chain_monitor = ChainMonitor(watcher_queue, Queue(), block_processor, bitcoind_feed_params) chain_monitor.best_tip = block_processor.get_best_block_hash() chain_monitor.polling_delta = 0.1 # monitor_chain_polling runs until terminate if set polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True) polling_thread.start() # Check that nothing changes as long as a block is not generated for _ in range(5): assert chain_monitor.watcher_queue.empty() time.sleep(0.1) # And that it does if we generate a block generate_block() chain_monitor.watcher_queue.get() assert chain_monitor.watcher_queue.empty() chain_monitor.terminate = True polling_thread.join()
def test_terminate(block_processor): queue = Queue() chain_monitor = ChainMonitor([queue, Queue()], block_processor, bitcoind_feed_params) chain_monitor.polling_delta = 0.1 chain_monitor.monitor_chain() chain_monitor.activate() chain_monitor.terminate() assert chain_monitor.status == ChainMonitorStatus.TERMINATED # generate a new block generate_blocks(1) time.sleep(0.11) # wait longer than the polling_delta # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated # after terminating assert queue.qsize() == 1 assert queue.get() == ChainMonitor.END_MESSAGE
def test_monitor_chain_polling(block_processor): chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params) chain_monitor.last_tips = [block_processor.get_best_block_hash()] chain_monitor.polling_delta = 0.1 # monitor_chain_polling runs until not terminated polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True) polling_thread.start() # Check that nothing changes as long as a block is not generated for _ in range(5): assert chain_monitor.queue.empty() time.sleep(0.1) # And that it does if we generate a block generate_blocks(1) chain_monitor.queue.get() assert chain_monitor.queue.empty() chain_monitor.terminate()
def test_monitor_chain_zmq(block_processor): responder_queue = Queue() chain_monitor = ChainMonitor([Queue(), responder_queue], block_processor, bitcoind_feed_params) chain_monitor.last_tips = [block_processor.get_best_block_hash()] zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True) zmq_thread.start() # the internal queue should start empty assert chain_monitor.queue.empty() # And have a new block every time we generate one for _ in range(3): generate_blocks(1) chain_monitor.queue.get() assert chain_monitor.queue.empty() chain_monitor.terminate() # The zmq thread needs a block generation to release from the recv method. generate_blocks(1)
def test_monitor_chain_and_activate(block_processor): # In this test, we generate some blocks after `monitor_chain`, then `activate` and generate few more blocks. # We verify that all the generated blocks are indeed sent to the queues in the right order. queue1 = Queue() queue2 = Queue() # We add some initial blocks to the receiving queues, to simulate a bootstrap with previous information pre_blocks = [get_random_value_hex(32) for _ in range(5)] for block in pre_blocks: queue1.put(block) queue2.put(block) # We don't activate the ChainMonitor but we start listening; therefore received blocks should accumulate in the # internal queue chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params) chain_monitor.polling_delta = 0.1 chain_monitor.monitor_chain() assert chain_monitor.status == ChainMonitorStatus.LISTENING # we generate some blocks while the monitor is listening but not active init_blocks = generate_blocks_with_delay(3, 0.15) time.sleep(0.11) # higher than the polling interval chain_monitor.activate() # generate some more blocks after activating after_blocks = generate_blocks_with_delay(3, 0.15) # we now check that all the blocks are in the receiving queues in the correct order all_blocks = pre_blocks + init_blocks + after_blocks for block in all_blocks: assert queue1.get(timeout=0.1) == block assert queue2.get(timeout=0.1) == block chain_monitor.terminate() # The zmq thread needs a block generation to release from the recv method. generate_blocks(1)
def test_activate(block_processor): # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() chain_monitor.activate() assert chain_monitor.status == ChainMonitorStatus.ACTIVE # last_tips is updated before starting the threads, so it not be empty now. assert len(chain_monitor.last_tips) > 0 # Blocks should be received for _ in range(5): generate_blocks(1) watcher_block = chain_monitor.receiving_queues[0].get() responder_block = chain_monitor.receiving_queues[1].get() assert watcher_block == responder_block assert chain_monitor.receiving_queues[0].empty() assert chain_monitor.receiving_queues[1].empty() chain_monitor.terminate() # The zmq thread needs a block generation to release from the recv method. generate_blocks(1)
def test_terminate(block_processor_mock, monkeypatch): # Test that the ChainMonitor is stopped on a terminate signal queue = Queue() chain_monitor = ChainMonitor([queue, Queue()], block_processor_mock, bitcoind_feed_params) chain_monitor.polling_delta = 0.1 # Activate the monitor chain_monitor.monitor_chain() chain_monitor.activate() # Ask it to terminate chain_monitor.terminate() assert chain_monitor.status == ChainMonitorStatus.TERMINATED # Mock generating a block generate a new block monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32)) time.sleep(0.11) # wait longer than the polling_delta # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated # after terminating assert queue.qsize() == 1 assert queue.get() == ChainMonitor.END_MESSAGE
def test_monitor_chain(block_processor): # We don't activate it but we start listening; therefore received blocks should accumulate in the internal queue chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params) chain_monitor.polling_delta = 0.1 chain_monitor.monitor_chain() assert chain_monitor.status == ChainMonitorStatus.LISTENING # The tip is updated before starting the threads, so it should have been added to last_tips. assert len(chain_monitor.last_tips) > 0 # Blocks should be received and added to the queue count = 0 for _ in range(5): generate_blocks(1) count += 1 time.sleep(0.11) # higher than the polling interval assert chain_monitor.receiving_queues[0].empty() assert chain_monitor.receiving_queues[1].empty() assert chain_monitor.queue.qsize() == count chain_monitor.terminate() # The zmq thread needs a block generation to release from the recv method. generate_blocks(1)
def test_monitor_chain(db_manager, block_processor): # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate chain_monitor = ChainMonitor(Queue(), Queue(), block_processor, bitcoind_feed_params) chain_monitor.best_tip = None chain_monitor.monitor_chain() # The tip is updated before starting the threads, so it should have changed. assert chain_monitor.best_tip is not None # Blocks should be received for _ in range(5): generate_block() watcher_block = chain_monitor.watcher_queue.get() responder_block = chain_monitor.responder_queue.get() assert watcher_block == responder_block assert chain_monitor.watcher_queue.empty() assert chain_monitor.responder_queue.empty() # And the thread be terminated on terminate chain_monitor.terminate = True # The zmq thread needs a block generation to release from the recv method. generate_block()
class TeosDaemon: """ The :class:`TeosDaemon` organizes the code to initialize all the components of teos, start the service, stop and teardown. Args: config (:obj:`dict`): the configuration object. sk (:obj:`PrivateKey`): the :obj:`PrivateKey` of the tower. logger (:obj:`Logger <teos.logger.Logger>`): the logger instance. logging_port (:obj:`int`): the port where the logging server can be reached (localhost:logging_port) stop_log_event (:obj:`multiprocessing.Event`): the event to signal a stop to the logging server logging_process (:obj:`multiprocessing.Process`): the logging server process Attributes: stop_command_event (:obj:`threading.Event`): The event that will be set to initiate a graceful shutdown. stop_event (:obj:`multiprocessing.Event`): The event that services running on different processes will monitor in order to be informed that they should shutdown. block_processor (:obj:`teos.block_processor.BlockProcessor`): The block processor instance. db_manager (:obj:`teos.appointments_dbm.AppointmentsDBM`): The db manager for appointments. watcher (:obj:`teos.watcher.Watcher`): The watcher instance. watcher_thread (:obj:`multithreading.Thread`): After ``bootstrap_components``, the thread that runs the Watcher monitoring (set to :obj:`None` beforehand). responder_thread (:obj:`multithreading.Thread`): After ``bootstrap_components``, the thread that runs the Responder monitoring (set to :obj:`None` beforehand). chain_monitor (:obj:`teos.chain_monitor.ChainMonitor`): The ``ChainMonitor`` instance. internal_api_endpoint (:obj:`str`): The full host name and port of the internal api. internal_api (:obj:`teos.internal_api.InternalAPI`): The InternalAPI instance. api_proc (:obj:`subprocess.Popen` or :obj:`multiprocessing.Process`): Once the rpc process is created, the instance of either ``Popen`` or ``Process`` that is serving the public API (set to :obj:`None` beforehand). rpc_process (:obj:`multiprocessing.Process`): The instance of the internal RPC server; only set if running. """ def __init__(self, config, sk, logger, logging_port, stop_log_event, logging_process): self.config = config self.logger = logger self.logging_port = logging_port self.stop_log_event = stop_log_event self.logging_process = logging_process # event triggered when a ``stop`` command is issued # Using multiprocessing.Event seems to cause a deadlock if event.set() is called in a signal handler that # interrupted event.wait(). This does not happen with threading.Event. # See https://bugs.python.org/issue41606 self.stop_command_event = threading.Event() # event triggered when the public API is halted, hence teosd is ready to stop self.stop_event = multiprocessing.Event() bitcoind_connect_params = { k: v for k, v in config.items() if k.startswith("BTC_RPC") } bitcoind_feed_params = { k: v for k, v in config.items() if k.startswith("BTC_FEED") } bitcoind_reachable = threading.Event() if not can_connect_to_bitcoind(bitcoind_connect_params): raise RuntimeError("Cannot connect to bitcoind") elif not in_correct_network(bitcoind_connect_params, config.get("BTC_NETWORK")): raise RuntimeError( "bitcoind is running on a different network, check teos.conf and bitcoin.conf" ) else: bitcoind_reachable.set() self.logger.info("tower_id = {}".format( Cryptographer.get_compressed_pk(sk.public_key))) self.block_processor = BlockProcessor(bitcoind_connect_params, bitcoind_reachable) carrier = Carrier(bitcoind_connect_params, bitcoind_reachable) gatekeeper = Gatekeeper( UsersDBM(self.config.get("USERS_DB_PATH")), self.block_processor, self.config.get("SUBSCRIPTION_SLOTS"), self.config.get("SUBSCRIPTION_DURATION"), self.config.get("EXPIRY_DELTA"), ) self.db_manager = AppointmentsDBM( self.config.get("APPOINTMENTS_DB_PATH")) responder = Responder(self.db_manager, gatekeeper, carrier, self.block_processor) self.watcher = Watcher( self.db_manager, gatekeeper, self.block_processor, responder, sk, self.config.get("MAX_APPOINTMENTS"), self.config.get("LOCATOR_CACHE_SIZE"), ) self.watcher_thread = None self.responder_thread = None # Create the chain monitor self.chain_monitor = ChainMonitor( [ self.watcher.block_queue, responder.block_queue, gatekeeper.block_queue ], self.block_processor, bitcoind_feed_params, ) # Set up the internal API self.internal_api_endpoint = f'{self.config.get("INTERNAL_API_HOST")}:{self.config.get("INTERNAL_API_PORT")}' self.internal_api = InternalAPI( self.watcher, self.internal_api_endpoint, self.config.get("INTERNAL_API_WORKERS"), self.stop_command_event) # Create the rpc, without starting it self.rpc_process = multiprocessing.Process( target=rpc.serve, args=( self.config.get("RPC_BIND"), self.config.get("RPC_PORT"), self.internal_api_endpoint, self.logging_port, self.stop_event, ), daemon=True, ) # This variables will contain the handle of the process running the API, when the service is started. # It will be an instance of either Popen or Process, depending on the WSGI config setting. self.api_proc = None def bootstrap_components(self): """ Performs the initial setup of the components. It loads the appointments and tracker for the watcher and the responder (if any), and awakes the components. It also populates the block queues with any missing data, in case the tower has been offline for some time. Finally, it starts the chain monitor. """ # Make sure that the ChainMonitor starts listening to new blocks while we bootstrap self.chain_monitor.monitor_chain() watcher_appointments_data = self.db_manager.load_watcher_appointments() responder_trackers_data = self.db_manager.load_responder_trackers() if len(watcher_appointments_data) == 0 and len( responder_trackers_data) == 0: self.logger.info("Fresh bootstrap") self.watcher_thread = self.watcher.awake() self.responder_thread = self.watcher.responder.awake() else: self.logger.info("Bootstrapping from backed up data") # Update the Watcher backed up data if found. if len(watcher_appointments_data) != 0: self.watcher.appointments, self.watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data) # Update the Responder with backed up data if found. if len(responder_trackers_data) != 0: self.watcher.responder.trackers, self.watcher.responder.tx_tracker_map = Builder.build_trackers( responder_trackers_data) # Awaking components so the states can be updated. self.watcher_thread = self.watcher.awake() self.responder_thread = self.watcher.responder.awake() last_block_watcher = self.db_manager.load_last_block_hash_watcher() last_block_responder = self.db_manager.load_last_block_hash_responder( ) # Populate the block queues with data if they've missed some while offline. If the blocks of both match # we don't perform the search twice. # FIXME: 32-reorgs-offline dropped txs are not used at this point. last_common_ancestor_watcher, dropped_txs_watcher = self.block_processor.find_last_common_ancestor( last_block_watcher) missed_blocks_watcher = self.block_processor.get_missed_blocks( last_common_ancestor_watcher) if last_block_watcher == last_block_responder: dropped_txs_responder = dropped_txs_watcher missed_blocks_responder = missed_blocks_watcher else: last_common_ancestor_responder, dropped_txs_responder = self.block_processor.find_last_common_ancestor( last_block_responder) missed_blocks_responder = self.block_processor.get_missed_blocks( last_common_ancestor_responder) # If only one of the instances needs to be updated, it can be done separately. if len(missed_blocks_watcher ) == 0 and len(missed_blocks_responder) != 0: Builder.populate_block_queue( self.watcher.responder.block_queue, missed_blocks_responder) self.watcher.responder.block_queue.join() elif len(missed_blocks_responder ) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(self.watcher.block_queue, missed_blocks_watcher) self.watcher.block_queue.join() # Otherwise they need to be updated at the same time, block by block elif len(missed_blocks_responder) != 0 and len( missed_blocks_watcher) != 0: Builder.update_states( self.watcher.block_queue, self.watcher.responder.block_queue, missed_blocks_watcher, missed_blocks_responder, ) # Activate ChainMonitor self.chain_monitor.activate() def start_services(self, logging_port): """ Readies the tower by setting up signal handling, and starting all the services. Args: logging_port (:obj:`int`): the port where the logging server can be reached (localhost:logging_port) """ signal(SIGINT, self.handle_signals) signal(SIGTERM, self.handle_signals) signal(SIGQUIT, self.handle_signals) # Start the rpc process self.rpc_process.start() # Start the internal API # This MUST be done after rpc_process.start to avoid the issue that was solved in # https://github.com/talaia-labs/python-teos/pull/198 self.internal_api.rpc_server.start() self.logger.info( f"Internal API initialized. Serving at {self.internal_api_endpoint}" ) # Start the public API server api_endpoint = f"{self.config.get('API_BIND')}:{self.config.get('API_PORT')}" if self.config.get("WSGI") == "gunicorn": # FIXME: We may like to add workers depending on a config value teos_folder = os.path.dirname(os.path.realpath(__file__)) self.api_proc = subprocess.Popen( [ "gunicorn", f"--config={os.path.join(teos_folder, 'gunicorn_config.py')}", f"--bind={api_endpoint}", f"teos.api:serve(internal_api_endpoint='{self.internal_api_endpoint}', " f"endpoint='{api_endpoint}', logging_port='{logging_port}', " f"min_to_self_delay='{self.config.get('MIN_TO_SELF_DELAY')}')", ], env={ **os.environ, **{ "LOG_SERVER_PORT": str(logging_port) } }, ) else: self.api_proc = multiprocessing.Process( target=api.serve, kwargs={ "internal_api_endpoint": self.internal_api_endpoint, "endpoint": api_endpoint, "logging_port": logging_port, "min_to_self_delay": self.config.get("MIN_TO_SELF_DELAY"), "auto_run": True, }, ) self.api_proc.start() def handle_signals(self, signum, frame): """Handles signals by initiating a graceful shutdown.""" self.logger.debug(f"Signal {signum} received. Stopping") self.stop_command_event.set() def teardown(self): """Shuts down all services and closes the DB, then exits. This method does not return.""" self.logger.info("Terminating public API") # Stop the public API first if isinstance(self.api_proc, subprocess.Popen): self.api_proc.terminate() self.api_proc.wait() elif isinstance(self.api_proc, multiprocessing.Process): # FIXME: using SIGKILL for now, adapt it to use SIGTERM so the shutdown can be grateful self.api_proc.kill() self.api_proc.join() self.logger.info("Public API terminated") # Signals readiness to shutdown to the other processes self.stop_event.set() # wait for RPC process to shutdown self.rpc_process.join() # Stops the internal API, after waiting for some grace time self.logger.info("Stopping internal API") self.internal_api.rpc_server.stop(SHUTDOWN_GRACE_TIME).wait() self.logger.info("Internal API stopped") # terminate the ChainMonitor self.chain_monitor.terminate() # wait for watcher and responder to finish processing their queues self.watcher_thread.join() self.responder_thread.join() self.logger.info("Closing connection with appointments db") self.db_manager.close() self.logger.info("Closing connection with users db") self.watcher.gatekeeper.user_db.close() self.logger.info("Shutting down TEOS") self.stop_log_event.set() self.logging_process.join() exit(0) def start(self): """This method implements the whole lifetime cycle of the the TEOS tower. This method does not return.""" self.logger.info("Starting TEOS") self.bootstrap_components() self.start_services(self.logging_port) self.stop_command_event.wait() self.teardown()
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor, generate_dummy_tracker): commitment_txs = [create_commitment_tx() for _ in range(20)] trackers = [ generate_dummy_tracker(commitment_tx) for commitment_tx in commitment_txs ] subscription_expiry = block_processor.get_block_count() + 110 # Broadcast all commitment transactions generate_block_with_transactions(commitment_txs) # Create a fresh responder to simplify the test responder = Responder(temp_db_manager, gatekeeper, carrier, block_processor) chain_monitor = ChainMonitor([Queue(), responder.block_queue], block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() chain_monitor.activate() # Let's set up the trackers first for tracker in trackers: uuid = uuid4().hex # Simulate user registration so trackers can properly expire responder.gatekeeper.registered_users[tracker.user_id] = UserInfo( available_slots=10, subscription_expiry=subscription_expiry) # Add data to the Responder responder.trackers[uuid] = tracker.get_summary() responder.tx_tracker_map[tracker.penalty_txid] = [uuid] responder.missed_confirmations[tracker.penalty_txid] = 0 responder.unconfirmed_txs.append(tracker.penalty_txid) # Assuming the appointment only took a single slot responder.gatekeeper.registered_users[ tracker.user_id].appointments[uuid] = 1 # We also need to store the info in the db responder.db_manager.create_triggered_appointment_flag(uuid) responder.db_manager.store_responder_tracker(uuid, tracker.to_dict()) # Let's start to watch do_watch_thread = Thread(target=responder.do_watch, daemon=True) do_watch_thread.start() # And broadcast some of the penalties broadcast_txs = [] for tracker in trackers[:5]: bitcoin_cli.sendrawtransaction(tracker.penalty_rawtx) broadcast_txs.append(tracker.penalty_txid) # Mine a block generate_blocks_with_delay(1) # The transactions we sent shouldn't be in the unconfirmed transaction list anymore assert not set(broadcast_txs).issubset(responder.unconfirmed_txs) # CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining) generate_blocks_with_delay(CONFIRMATIONS_BEFORE_RETRY + 1) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 20 # Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers generate_blocks_with_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 15 # Check they are not in the Gatekeeper either for tracker in trackers[:5]: assert len(responder.gatekeeper.registered_users[ tracker.user_id].appointments) == 0 # CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest generate_blocks_with_delay(CONFIRMATIONS_BEFORE_RETRY) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 0 # Check they are not in the Gatekeeper either for tracker in trackers[5:]: assert len(responder.gatekeeper.registered_users[ tracker.user_id].appointments) == 0 chain_monitor.terminate() do_watch_thread.join()