def test_update_states_responder_misses_more(run_bitcoind, db_manager, gatekeeper, carrier, block_processor): w = Watcher( db_manager=db_manager, gatekeeper=gatekeeper, block_processor=block_processor, responder=Responder(db_manager, gatekeeper, carrier, block_processor), sk_der=generate_keypair()[0].to_der(), max_appointments=config.get("MAX_APPOINTMENTS"), blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) # Updating the states should bring both to the same last known block. w.awake() w.responder.awake() Builder.update_states(w, blocks, blocks[1:]) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert w.responder.last_known_block == blocks[-1]
def test_update_states_watcher_misses_more(monkeypatch): # Test the case where both components have data that need to be updated, but the Watcher has more. blocks = [get_random_value_hex(32) for _ in range(5)] watcher_queue = Queue() responder_queue = Queue() # Monkeypatch so there's no join, since the queues are not tied to a Watcher and a Responder for the test monkeypatch.setattr(watcher_queue, "join", lambda: None) monkeypatch.setattr(responder_queue, "join", lambda: None) Builder.update_states(watcher_queue, responder_queue, blocks[1:], blocks) assert responder_queue.queue.pop() == blocks[-1] assert watcher_queue.queue.pop() == blocks[-1]
def test_populate_block_queue(): # Create some random block hashes and construct the queue with them blocks = [get_random_value_hex(32) for _ in range(10)] queue = Queue() Builder.populate_block_queue(queue, blocks) # Make sure every block is in the queue and that there are not additional ones while not queue.empty(): block = queue.get() assert block in blocks blocks.remove(block) assert len(blocks) == 0
def test_build_trackers(generate_dummy_tracker): # build_trackers builds two dictionaries: trackers (uuid: TransactionTracker) and tx_tracker_map (txid:uuid) # These are populated with data pulled from the database and used as initial state by the Responder during bootstrap trackers_data = {} # Create some trackers data for i in range(10): tracker = generate_dummy_tracker() trackers_data[uuid4().hex] = tracker.to_dict() # Add some additional trackers that share the same locator to test all the builder's cases if i % 2 == 0: penalty_txid = tracker.penalty_txid tracker = generate_dummy_tracker() tracker.penalty_txid = penalty_txid trackers_data[uuid4().hex] = tracker.to_dict() trackers, tx_tracker_map = Builder.build_trackers(trackers_data) # Check that the built trackers match the data for uuid, tracker in trackers.items(): assert uuid in trackers_data.keys() assert tracker.get("penalty_txid") == trackers_data[uuid].get( "penalty_txid") assert tracker.get("locator") == trackers_data[uuid].get("locator") assert tracker.get("user_id") == trackers_data[uuid].get("user_id") assert uuid in tx_tracker_map[tracker.get("penalty_txid")]
def test_build_appointments(generate_dummy_appointment): # build_appointments builds two dictionaries: appointments (uuid:ExtendedAppointment) and locator_uuid_map # (locator:uuid). These are populated with data pulled from the database and used as initial state by the Watcher # during bootstrap appointments_data = {} # Create some appointment data for i in range(10): appointment = generate_dummy_appointment() uuid = uuid4().hex appointments_data[uuid] = appointment.to_dict() # Add some additional appointments that share the same locator to test all the builder's cases if i % 2 == 0: locator = appointment.locator appointment = generate_dummy_appointment() uuid = uuid4().hex appointment.locator = locator appointments_data[uuid] = appointment.to_dict() # Use the builder to create the data structures appointments, locator_uuid_map = Builder.build_appointments( appointments_data) # Check that the created appointments match the data for uuid, appointment in appointments.items(): assert uuid in appointments_data.keys() assert appointments_data[uuid].get("locator") == appointment.get( "locator") assert appointments_data[uuid].get("user_id") == appointment.get( "user_id") assert uuid in locator_uuid_map[appointment.get("locator")]
def test_build_trackers(): trackers_data = {} # Create some trackers data for i in range(10): tracker = generate_dummy_tracker() trackers_data[uuid4().hex] = tracker.to_dict() # Add some additional trackers that share the same locator to test all the builder's cases if i % 2 == 0: penalty_txid = tracker.penalty_txid tracker = generate_dummy_tracker() tracker.penalty_txid = penalty_txid trackers_data[uuid4().hex] = tracker.to_dict() trackers, tx_tracker_map = Builder.build_trackers(trackers_data) # Check that the built trackers match the data for uuid, tracker in trackers.items(): assert uuid in trackers_data.keys() assert tracker.get("penalty_txid") == trackers_data[uuid].get( "penalty_txid") assert tracker.get("locator") == trackers_data[uuid].get("locator") assert tracker.get("appointment_end") == trackers_data[uuid].get( "appointment_end") assert uuid in tx_tracker_map[tracker.get("penalty_txid")]
def test_build_appointments(): appointments_data = {} # Create some appointment data for i in range(10): appointment, _ = generate_dummy_appointment(real_height=False) uuid = uuid4().hex appointments_data[uuid] = appointment.to_dict() # Add some additional appointments that share the same locator to test all the builder's cases if i % 2 == 0: locator = appointment.locator appointment, _ = generate_dummy_appointment(real_height=False) uuid = uuid4().hex appointment.locator = locator appointments_data[uuid] = appointment.to_dict() # Use the builder to create the data structures appointments, locator_uuid_map = Builder.build_appointments( appointments_data) # Check that the created appointments match the data for uuid, appointment in appointments.items(): assert uuid in appointments_data.keys() assert appointments_data[uuid].get("locator") == appointment.get( "locator") assert appointments_data[uuid].get("end_time") == appointment.get( "end_time") assert uuid in locator_uuid_map[appointment.get("locator")]
def test_update_states_empty_list(): # update_states feed data to both the Watcher and the Responder block queue and waits until it is processed. It is # used to bring both components up to date during bootstrap. This is only used iof both have missed blocks, # otherwise populate_block_queue must be used. # Test the case where one of the components does not have any data to update with watcher_queue = Queue() responder_queue = Queue() missed_blocks_watcher = [] missed_blocks_responder = [get_random_value_hex(32)] # Any combination of empty list must raise a ValueError with pytest.raises(ValueError): Builder.update_states(watcher_queue, responder_queue, missed_blocks_watcher, missed_blocks_responder) with pytest.raises(ValueError): Builder.update_states(watcher_queue, responder_queue, missed_blocks_responder, missed_blocks_watcher)
def test_update_states_empty_list(db_manager, carrier, block_processor): w = Watcher( db_manager=db_manager, block_processor=block_processor, responder=Responder(db_manager, carrier, block_processor), sk_der=None, max_appointments=config.get("MAX_APPOINTMENTS"), expiry_delta=config.get("EXPIRY_DELTA"), ) missed_blocks_watcher = [] missed_blocks_responder = [get_random_value_hex(32)] # Any combination of empty list must raise a ValueError with pytest.raises(ValueError): Builder.update_states(w, missed_blocks_watcher, missed_blocks_responder) with pytest.raises(ValueError): Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher)
def test_update_states_watcher_misses_more(db_manager, carrier, block_processor): # Same as before, but data is now in the Responder w = Watcher( db_manager=db_manager, block_processor=block_processor, responder=Responder(db_manager, carrier, block_processor), sk_der=None, max_appointments=config.get("MAX_APPOINTMENTS"), expiry_delta=config.get("EXPIRY_DELTA"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) w.awake() w.responder.awake() Builder.update_states(w, blocks[1:], blocks) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert db_manager.load_last_block_hash_responder() == blocks[-1]
def test_update_states_responder_misses_more(run_bitcoind, db_manager, carrier, block_processor): w = Watcher( db_manager=db_manager, block_processor=block_processor, responder=Responder(db_manager, carrier, block_processor), sk_der=None, max_appointments=config.get("MAX_APPOINTMENTS"), expiry_delta=config.get("EXPIRY_DELTA"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) # Updating the states should bring both to the same last known block. w.awake() w.responder.awake() Builder.update_states(w, blocks, blocks[1:]) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert w.responder.last_known_block == blocks[-1]
def test_update_states_empty_list(db_manager, gatekeeper, carrier, block_processor): w = Watcher( db_manager=db_manager, gatekeeper=gatekeeper, block_processor=block_processor, responder=Responder(db_manager, gatekeeper, carrier, block_processor), sk_der=generate_keypair()[0].to_der(), max_appointments=config.get("MAX_APPOINTMENTS"), blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"), ) missed_blocks_watcher = [] missed_blocks_responder = [get_random_value_hex(32)] # Any combination of empty list must raise a ValueError with pytest.raises(ValueError): Builder.update_states(w, missed_blocks_watcher, missed_blocks_responder) with pytest.raises(ValueError): Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher)
def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier, block_processor): # Same as before, but data is now in the Responder w = Watcher( db_manager=db_manager, gatekeeper=gatekeeper, block_processor=block_processor, responder=Responder(db_manager, gatekeeper, carrier, block_processor), sk_der=generate_keypair()[0].to_der(), max_appointments=config.get("MAX_APPOINTMENTS"), blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) w.awake() w.responder.awake() Builder.update_states(w, blocks[1:], blocks) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert db_manager.load_last_block_hash_responder() == blocks[-1]
def main(command_line_conf): global db_manager, chain_monitor signal(SIGINT, handle_signals) signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) # Loads config and sets up the data folder and log file config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf) config = config_loader.build_config() setup_data_folder(DATA_DIR) setup_logging(config.get("LOG_FILE"), LOG_PREFIX) logger.info("Starting TEOS") db_manager = DBManager(config.get("DB_PATH")) bitcoind_connect_params = { k: v for k, v in config.items() if k.startswith("BTC") } bitcoind_feed_params = { k: v for k, v in config.items() if k.startswith("FEED") } if not can_connect_to_bitcoind(bitcoind_connect_params): logger.error("Can't connect to bitcoind. Shutting down") elif not in_correct_network(bitcoind_connect_params, config.get("BTC_NETWORK")): logger.error( "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down" ) else: try: secret_key_der = Cryptographer.load_key_file( config.get("TEOS_SECRET_KEY")) if not secret_key_der: raise IOError("TEOS private key can't be loaded") block_processor = BlockProcessor(bitcoind_connect_params) carrier = Carrier(bitcoind_connect_params) responder = Responder(db_manager, carrier, block_processor) watcher = Watcher( db_manager, block_processor, responder, secret_key_der, config.get("MAX_APPOINTMENTS"), config.get("EXPIRY_DELTA"), ) # Create the chain monitor and start monitoring the chain chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params) watcher_appointments_data = db_manager.load_watcher_appointments() responder_trackers_data = db_manager.load_responder_trackers() if len(watcher_appointments_data) == 0 and len( responder_trackers_data) == 0: logger.info("Fresh bootstrap") watcher.awake() watcher.responder.awake() else: logger.info("Bootstrapping from backed up data") # Update the Watcher backed up data if found. if len(watcher_appointments_data) != 0: watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data) # Update the Responder with backed up data if found. if len(responder_trackers_data) != 0: watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( responder_trackers_data) # Awaking components so the states can be updated. watcher.awake() watcher.responder.awake() last_block_watcher = db_manager.load_last_block_hash_watcher() last_block_responder = db_manager.load_last_block_hash_responder( ) # Populate the block queues with data if they've missed some while offline. If the blocks of both match # we don't perform the search twice. # FIXME: 32-reorgs-offline dropped txs are not used at this point. last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( last_block_watcher) missed_blocks_watcher = block_processor.get_missed_blocks( last_common_ancestor_watcher) if last_block_watcher == last_block_responder: dropped_txs_responder = dropped_txs_watcher missed_blocks_responder = missed_blocks_watcher else: last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor( last_block_responder) missed_blocks_responder = block_processor.get_missed_blocks( last_common_ancestor_responder) # If only one of the instances needs to be updated, it can be done separately. if len(missed_blocks_watcher ) == 0 and len(missed_blocks_responder) != 0: Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) watcher.responder.block_queue.join() elif len(missed_blocks_responder ) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(watcher.block_queue, missed_blocks_watcher) watcher.block_queue.join() # Otherwise they need to be updated at the same time, block by block elif len(missed_blocks_responder) != 0 and len( missed_blocks_watcher) != 0: Builder.update_states(watcher, missed_blocks_watcher, missed_blocks_responder) # Fire the API and the ChainMonitor # FIXME: 92-block-data-during-bootstrap-db chain_monitor.monitor_chain() API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher).start() except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) exit(1)
def bootstrap_components(self): """ Performs the initial setup of the components. It loads the appointments and tracker for the watcher and the responder (if any), and awakes the components. It also populates the block queues with any missing data, in case the tower has been offline for some time. Finally, it starts the chain monitor. """ # Make sure that the ChainMonitor starts listening to new blocks while we bootstrap self.chain_monitor.monitor_chain() watcher_appointments_data = self.db_manager.load_watcher_appointments() responder_trackers_data = self.db_manager.load_responder_trackers() if len(watcher_appointments_data) == 0 and len( responder_trackers_data) == 0: self.logger.info("Fresh bootstrap") self.watcher_thread = self.watcher.awake() self.responder_thread = self.watcher.responder.awake() else: self.logger.info("Bootstrapping from backed up data") # Update the Watcher backed up data if found. if len(watcher_appointments_data) != 0: self.watcher.appointments, self.watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data) # Update the Responder with backed up data if found. if len(responder_trackers_data) != 0: self.watcher.responder.trackers, self.watcher.responder.tx_tracker_map = Builder.build_trackers( responder_trackers_data) # Awaking components so the states can be updated. self.watcher_thread = self.watcher.awake() self.responder_thread = self.watcher.responder.awake() last_block_watcher = self.db_manager.load_last_block_hash_watcher() last_block_responder = self.db_manager.load_last_block_hash_responder( ) # Populate the block queues with data if they've missed some while offline. If the blocks of both match # we don't perform the search twice. # FIXME: 32-reorgs-offline dropped txs are not used at this point. last_common_ancestor_watcher, dropped_txs_watcher = self.block_processor.find_last_common_ancestor( last_block_watcher) missed_blocks_watcher = self.block_processor.get_missed_blocks( last_common_ancestor_watcher) if last_block_watcher == last_block_responder: dropped_txs_responder = dropped_txs_watcher missed_blocks_responder = missed_blocks_watcher else: last_common_ancestor_responder, dropped_txs_responder = self.block_processor.find_last_common_ancestor( last_block_responder) missed_blocks_responder = self.block_processor.get_missed_blocks( last_common_ancestor_responder) # If only one of the instances needs to be updated, it can be done separately. if len(missed_blocks_watcher ) == 0 and len(missed_blocks_responder) != 0: Builder.populate_block_queue( self.watcher.responder.block_queue, missed_blocks_responder) self.watcher.responder.block_queue.join() elif len(missed_blocks_responder ) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(self.watcher.block_queue, missed_blocks_watcher) self.watcher.block_queue.join() # Otherwise they need to be updated at the same time, block by block elif len(missed_blocks_responder) != 0 and len( missed_blocks_watcher) != 0: Builder.update_states( self.watcher.block_queue, self.watcher.responder.block_queue, missed_blocks_watcher, missed_blocks_responder, ) # Activate ChainMonitor self.chain_monitor.activate()