def watcher(db_manager): block_processor = BlockProcessor(bitcoind_connect_params) carrier = Carrier(bitcoind_connect_params) responder = Responder(db_manager, carrier, block_processor) watcher = Watcher( db_manager, block_processor, responder, signing_key.to_der(), config.get("MAX_APPOINTMENTS"), config.get("EXPIRY_DELTA"), ) chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() return watcher
def watcher(db_manager, gatekeeper): block_processor = BlockProcessor(bitcoind_connect_params) carrier = Carrier(bitcoind_connect_params) responder = Responder(db_manager, gatekeeper, carrier, block_processor) watcher = Watcher( db_manager, gatekeeper, block_processor, responder, signing_key.to_der(), MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE"), ) chain_monitor = ChainMonitor( watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params ) chain_monitor.monitor_chain() return watcher
def test_handle_breach_bad_response(db_manager, block_processor): # We need a new carrier here, otherwise the transaction will be flagged as previously sent and receipt.delivered # will be True responder = Responder(db_manager, Carrier(bitcoind_connect_params), block_processor) uuid = uuid4().hex tracker = create_dummy_tracker() # A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though. tracker.penalty_rawtx = tracker.penalty_txid # The block_hash passed to add_response does not matter much now. It will in the future to deal with errors receipt = responder.handle_breach( tracker.locator, uuid, tracker.dispute_txid, tracker.penalty_txid, tracker.penalty_rawtx, tracker.appointment_end, block_hash=get_random_value_hex(32), ) assert receipt.delivered is False
def watcher(run_bitcoind, db_manager, gatekeeper): block_processor = BlockProcessor(bitcoind_connect_params) carrier = Carrier(bitcoind_connect_params) responder = Responder(db_manager, gatekeeper, carrier, block_processor) watcher = Watcher( db_manager, gatekeeper, block_processor, responder, signing_key, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE"), ) watcher.last_known_block = block_processor.get_best_block_hash() chain_monitor = ChainMonitor( [watcher.block_queue, watcher.responder.block_queue], block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() chain_monitor.activate() return watcher
def main(command_line_conf): global db_manager, chain_monitor signal(SIGINT, handle_signals) signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) # Loads config and sets up the data folder and log file config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf) config = config_loader.build_config() setup_data_folder(DATA_DIR) setup_logging(config.get("LOG_FILE"), LOG_PREFIX) logger.info("Starting TEOS") db_manager = DBManager(config.get("DB_PATH")) bitcoind_connect_params = { k: v for k, v in config.items() if k.startswith("BTC") } bitcoind_feed_params = { k: v for k, v in config.items() if k.startswith("FEED") } if not can_connect_to_bitcoind(bitcoind_connect_params): logger.error("Can't connect to bitcoind. Shutting down") elif not in_correct_network(bitcoind_connect_params, config.get("BTC_NETWORK")): logger.error( "bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down" ) else: try: secret_key_der = Cryptographer.load_key_file( config.get("TEOS_SECRET_KEY")) if not secret_key_der: raise IOError("TEOS private key can't be loaded") block_processor = BlockProcessor(bitcoind_connect_params) carrier = Carrier(bitcoind_connect_params) responder = Responder(db_manager, carrier, block_processor) watcher = Watcher( db_manager, block_processor, responder, secret_key_der, config.get("MAX_APPOINTMENTS"), config.get("EXPIRY_DELTA"), ) # Create the chain monitor and start monitoring the chain chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params) watcher_appointments_data = db_manager.load_watcher_appointments() responder_trackers_data = db_manager.load_responder_trackers() if len(watcher_appointments_data) == 0 and len( responder_trackers_data) == 0: logger.info("Fresh bootstrap") watcher.awake() watcher.responder.awake() else: logger.info("Bootstrapping from backed up data") # Update the Watcher backed up data if found. if len(watcher_appointments_data) != 0: watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data) # Update the Responder with backed up data if found. if len(responder_trackers_data) != 0: watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( responder_trackers_data) # Awaking components so the states can be updated. watcher.awake() watcher.responder.awake() last_block_watcher = db_manager.load_last_block_hash_watcher() last_block_responder = db_manager.load_last_block_hash_responder( ) # Populate the block queues with data if they've missed some while offline. If the blocks of both match # we don't perform the search twice. # FIXME: 32-reorgs-offline dropped txs are not used at this point. last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( last_block_watcher) missed_blocks_watcher = block_processor.get_missed_blocks( last_common_ancestor_watcher) if last_block_watcher == last_block_responder: dropped_txs_responder = dropped_txs_watcher missed_blocks_responder = missed_blocks_watcher else: last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor( last_block_responder) missed_blocks_responder = block_processor.get_missed_blocks( last_common_ancestor_responder) # If only one of the instances needs to be updated, it can be done separately. if len(missed_blocks_watcher ) == 0 and len(missed_blocks_responder) != 0: Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) watcher.responder.block_queue.join() elif len(missed_blocks_responder ) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(watcher.block_queue, missed_blocks_watcher) watcher.block_queue.join() # Otherwise they need to be updated at the same time, block by block elif len(missed_blocks_responder) != 0 and len( missed_blocks_watcher) != 0: Builder.update_states(watcher, missed_blocks_watcher, missed_blocks_responder) # Fire the API and the ChainMonitor # FIXME: 92-block-data-during-bootstrap-db chain_monitor.monitor_chain() API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher).start() except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) exit(1)
def __init__(self, config, sk, logger, logging_port, stop_log_event, logging_process): self.config = config self.logger = logger self.logging_port = logging_port self.stop_log_event = stop_log_event self.logging_process = logging_process # event triggered when a ``stop`` command is issued # Using multiprocessing.Event seems to cause a deadlock if event.set() is called in a signal handler that # interrupted event.wait(). This does not happen with threading.Event. # See https://bugs.python.org/issue41606 self.stop_command_event = threading.Event() # event triggered when the public API is halted, hence teosd is ready to stop self.stop_event = multiprocessing.Event() bitcoind_connect_params = { k: v for k, v in config.items() if k.startswith("BTC_RPC") } bitcoind_feed_params = { k: v for k, v in config.items() if k.startswith("BTC_FEED") } bitcoind_reachable = threading.Event() if not can_connect_to_bitcoind(bitcoind_connect_params): raise RuntimeError("Cannot connect to bitcoind") elif not in_correct_network(bitcoind_connect_params, config.get("BTC_NETWORK")): raise RuntimeError( "bitcoind is running on a different network, check teos.conf and bitcoin.conf" ) else: bitcoind_reachable.set() self.logger.info("tower_id = {}".format( Cryptographer.get_compressed_pk(sk.public_key))) self.block_processor = BlockProcessor(bitcoind_connect_params, bitcoind_reachable) carrier = Carrier(bitcoind_connect_params, bitcoind_reachable) gatekeeper = Gatekeeper( UsersDBM(self.config.get("USERS_DB_PATH")), self.block_processor, self.config.get("SUBSCRIPTION_SLOTS"), self.config.get("SUBSCRIPTION_DURATION"), self.config.get("EXPIRY_DELTA"), ) self.db_manager = AppointmentsDBM( self.config.get("APPOINTMENTS_DB_PATH")) responder = Responder(self.db_manager, gatekeeper, carrier, self.block_processor) self.watcher = Watcher( self.db_manager, gatekeeper, self.block_processor, responder, sk, self.config.get("MAX_APPOINTMENTS"), self.config.get("LOCATOR_CACHE_SIZE"), ) self.watcher_thread = None self.responder_thread = None # Create the chain monitor self.chain_monitor = ChainMonitor( [ self.watcher.block_queue, responder.block_queue, gatekeeper.block_queue ], self.block_processor, bitcoind_feed_params, ) # Set up the internal API self.internal_api_endpoint = f'{self.config.get("INTERNAL_API_HOST")}:{self.config.get("INTERNAL_API_PORT")}' self.internal_api = InternalAPI( self.watcher, self.internal_api_endpoint, self.config.get("INTERNAL_API_WORKERS"), self.stop_command_event) # Create the rpc, without starting it self.rpc_process = multiprocessing.Process( target=rpc.serve, args=( self.config.get("RPC_BIND"), self.config.get("RPC_PORT"), self.internal_api_endpoint, self.logging_port, self.stop_event, ), daemon=True, ) # This variables will contain the handle of the process running the API, when the service is started. # It will be an instance of either Popen or Process, depending on the WSGI config setting. self.api_proc = None
def carrier(run_bitcoind): bitcoind_reachable = Event() bitcoind_reachable.set() return Carrier(bitcoind_connect_params, bitcoind_reachable)
def carrier(): return Carrier(bitcoind_connect_params)