def test_request_appointment_responder(new_appt_data): # Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"] ["locator"]] bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx) r = add_appointment(new_appt_data) assert r.status_code == 200 # Generate a block to trigger the watcher generate_block() r = requests.get(url=TEOS_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"]) assert r.status_code == 200 received_appointments = json.loads(r.content) appointment_status = [ appointment.pop("status") for appointment in received_appointments ] appointment_locators = [ appointment["locator"] for appointment in received_appointments ] assert new_appt_data["appointment"][ "locator"] in appointment_locators and len(received_appointments) == 1 assert all( [status == "dispute_responded" for status in appointment_status]) and len(appointment_status) == 1
def test_bitcoin_cli(): try: bitcoin_cli(bitcoind_connect_params).help() assert True except Exception: assert False
def get_transaction(self, txid): """ Queries transaction data to ``bitcoind`` given a transaction id. Args: txid (:obj:`str`): a 32-byte hex-formatted string representing the transaction id. Returns: :obj:`dict` or :obj:`None`: A dictionary with the transaction data if the transaction can be found on the chain. Returns ``None`` otherwise. """ try: tx_info = bitcoin_cli(self.btc_connect_params).getrawtransaction( txid, 1) except JSONRPCException as e: tx_info = None # While it's quite unlikely, the transaction that was already in the blockchain could have been # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the tracker if e.error.get("code") == RPC_INVALID_ADDRESS_OR_KEY: logger.info("Transaction not found in mempool nor blockchain", txid=txid) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logger.error("JSONRPCException", method="Carrier.get_transaction", error=e.error) return tx_info
def get_best_block_hash(self, blocking=False): """ Gets the hash of the current best chain tip. Args: blocking (:obj:`bool`): whether the call should be blocking (wait for bitcoind to be available) or not. Returns: :obj:`str` or :obj:`None`: The hash of the block if it can be found. Returns :obj:`None` otherwise (not even sure this can actually happen). Raises: :obj:`ConnectionRefusedError`: if bitcoind cannot be reached. """ if blocking: return self._blocking_query(lambda: self.get_best_block_hash()) try: block_hash = bitcoin_cli( self.btc_connect_params).getbestblockhash() except JSONRPCException as e: block_hash = None self.logger.error("Couldn't get block hash", error=e.error) return block_hash
def get_block(self, block_hash, blocking=False): """ Gets a block given a block hash by querying ``bitcoind``. Args: block_hash (:obj:`str`): the block hash to be queried. blocking (:obj:`bool`): whether the call should be blocking (wait for bitcoind to be available) or not. Returns: :obj:`dict` or :obj:`None`: A dictionary containing the requested block data if the block is found. Returns :obj:`None` otherwise. Raises: :obj:`ConnectionRefusedError`: if bitcoind cannot be reached. """ if blocking: return self._blocking_query(lambda: self.get_block(block_hash)) try: block = bitcoin_cli(self.btc_connect_params).getblock(block_hash) except JSONRPCException as e: block = None self.logger.error("Couldn't get block from bitcoind", error=e.error) return block
def decode_raw_transaction(self, raw_tx, blocking=False): """ Deserializes a given raw transaction (hex encoded) and builds a dictionary representing it with all the associated metadata given by ``bitcoind`` (e.g. confirmation count). Args: raw_tx (:obj:`str`): the hex representation of the transaction. blocking (:obj:`bool`): whether the call should be blocking (wait for bitcoind to be available) or not. Returns: :obj:`dict`: The decoding of the given ``raw_tx`` if the transaction is well formatted. Raises: :obj:`InvalidTransactionFormat`: If the `provided ``raw_tx`` has invalid format. :obj:`ConnectionRefusedError`: if bitcoind cannot be reached. """ if blocking: return self._blocking_query( lambda: self.decode_raw_transaction(raw_tx)) try: tx = bitcoin_cli( self.btc_connect_params).decoderawtransaction(raw_tx) except JSONRPCException as e: msg = "Cannot build transaction from decoded data" self.logger.error(msg, error=e.error) raise InvalidTransactionFormat(msg) return tx
def create_dummy_tracker_data(random_txid=False, penalty_rawtx=None): # The following transaction data corresponds to a valid transaction. For some test it may be interesting to have # some valid data, but for others we may need multiple different penalty_txids. dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9" penalty_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16" if penalty_rawtx is None: penalty_rawtx = ( "0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402" "204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4" "acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b" "13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba" "ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e" "cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000" ) else: penalty_txid = create_tx_from_hex(penalty_rawtx).tx_id.hex() if random_txid is True: penalty_txid = get_random_value_hex(32) appointment_end = bitcoin_cli(bitcoind_connect_params).getblockcount() + 2 locator = dispute_txid[:LOCATOR_LEN_HEX] return locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end
def test_update_states_responder_misses_more(run_bitcoind, db_manager, gatekeeper, carrier, block_processor): w = Watcher( db_manager=db_manager, gatekeeper=gatekeeper, block_processor=block_processor, responder=Responder(db_manager, gatekeeper, carrier, block_processor), sk_der=generate_keypair()[0].to_der(), max_appointments=config.get("MAX_APPOINTMENTS"), blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) # Updating the states should bring both to the same last known block. w.awake() w.responder.awake() Builder.update_states(w, blocks, blocks[1:]) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert w.responder.last_known_block == blocks[-1]
def decode_raw_transaction(self, raw_tx): """ Deserializes a given raw transaction (hex encoded) and builds a dictionary representing it with all the associated metadata given by ``bitcoind`` (e.g. confirmation count). Args: raw_tx (:obj:`str`): the hex representation of the transaction. Returns: :obj:`dict`: The decoding of the given ``raw_tx`` if the transaction is well formatted. Raises: :obj:`InvalidTransactionFormat`: If the provided ``raw_tx`` has invalid format. """ try: tx = bitcoin_cli( self.btc_connect_params).decoderawtransaction(raw_tx) except JSONRPCException as e: msg = "Cannot build transaction from decoded data" logger.error(msg, error=e.error) raise InvalidTransactionFormat(msg) return tx
def test_do_watch(watcher, temp_db_manager): watcher.db_manager = temp_db_manager # We will wipe all the previous data and add 5 appointments appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS) # Set the data into the Watcher and in the db watcher.locator_uuid_map = locator_uuid_map watcher.appointments = {} watcher.gatekeeper.registered_users = {} # Simulate a register (times out in 10 bocks) user_id = get_random_value_hex(16) watcher.gatekeeper.registered_users[user_id] = UserInfo( available_slots=100, subscription_expiry=watcher.block_processor.get_block_count() + 10 ) # Add the appointments for uuid, appointment in appointments.items(): watcher.appointments[uuid] = {"locator": appointment.locator, "user_id": user_id} # Assume the appointment only takes one slot watcher.gatekeeper.registered_users[user_id].appointments[uuid] = 1 watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict()) watcher.db_manager.create_append_locator_map(appointment.locator, uuid) do_watch_thread = Thread(target=watcher.do_watch, daemon=True) do_watch_thread.start() # Broadcast the first two for dispute_tx in dispute_txs[:2]: bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx) # After generating a block, the appointment count should have been reduced by 2 (two breaches) generate_blocks_w_delay(1) assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the subscription times-out (9 more blocks) + EXPIRY_DELTA # Wait for an additional block to be safe generate_blocks_w_delay(10 + config.get("EXPIRY_DELTA")) assert len(watcher.appointments) == 0 # Check that they are not in the Gatekeeper either, only the two that passed to the Responder should remain assert len(watcher.gatekeeper.registered_users[user_id].appointments) == 2
def test_locator_cache_init_not_enough_blocks(run_bitcoind, block_processor): locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Make sure there are at least 3 blocks block_count = block_processor.get_block_count() if block_count < 3: generate_blocks_w_delay(3 - block_count) # Simulate there are only 3 blocks third_block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(2) locator_cache.init(third_block_hash, block_processor) assert len(locator_cache.blocks) == 3 for k, v in locator_cache.blocks.items(): assert block_processor.get_block(k)
def test_do_watch(watcher, temp_db_manager): watcher.db_manager = temp_db_manager # We will wipe all the previous data and add 5 appointments appointments, locator_uuid_map, dispute_txs = create_appointments( APPOINTMENTS) # Set the data into the Watcher and in the db watcher.locator_uuid_map = locator_uuid_map watcher.appointments = {} for uuid, appointment in appointments.items(): watcher.appointments[uuid] = { "locator": appointment.locator, "end_time": appointment.end_time } watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json()) watcher.db_manager.create_append_locator_map(appointment.locator, uuid) do_watch_thread = Thread(target=watcher.do_watch, daemon=True) do_watch_thread.start() # Broadcast the first two for dispute_tx in dispute_txs[:2]: bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx) # After generating enough blocks, the number of appointments should have reduced by two generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET) assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA # Wait for an additional block to be safe generate_blocks( config.get("EXPIRY_DELTA") + START_TIME_OFFSET + END_TIME_OFFSET) assert len(watcher.appointments) == 0
def test_get_all_appointments_responder(): # Trigger all disputes locators = [appointment["locator"] for appointment in appointments] for locator, dispute_tx in locator_dispute_tx_map.items(): if locator in locators: bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx) # Confirm transactions generate_blocks(6) # Get all appointments r = requests.get(url=TEOS_API + "/get_all_appointments") received_appointments = json.loads(r.content) # Make sure there is not pending locator in the watcher responder_trackers = [ v["locator"] for k, v in received_appointments["responder_trackers"].items() ] local_locators = [appointment["locator"] for appointment in appointments] assert set(responder_trackers) == set(local_locators) assert len(received_appointments["watcher_appointments"]) == 0
def test_fix_cache(block_processor): # This tests how a reorg will create a new version of the cache # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE"))) locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) locator_cache.init(block_processor.get_best_block_hash(), block_processor) assert len(locator_cache.blocks) == locator_cache.cache_size # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past. current_tip = block_processor.get_best_block_hash() current_tip_locators = locator_cache.blocks[current_tip] current_tip_parent = block_processor.get_block(current_tip).get("previousblockhash") current_tip_parent_locators = locator_cache.blocks[current_tip_parent] fake_tip = block_processor.get_block(current_tip_parent).get("previousblockhash") locator_cache.fix(fake_tip, block_processor) # The last two blocks are not in the cache nor are the any of its locators assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks for locator in current_tip_parent_locators + current_tip_locators: assert locator not in locator_cache.cache # The fake tip is the new tip, and two additional blocks are at the bottom assert fake_tip in locator_cache.blocks and list(locator_cache.blocks.keys())[-1] == fake_tip assert len(locator_cache.blocks) == locator_cache.cache_size # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and # trigger a fix. We'll use a new cache to compare with the old old_cache_blocks = deepcopy(locator_cache.blocks) generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE") * 2)) locator_cache.fix(block_processor.get_best_block_hash(), block_processor) # None of the data from the old cache is in the new cache for block_hash, locators in old_cache_blocks.items(): assert block_hash not in locator_cache.blocks for locator in locators: assert locator not in locator_cache.cache # The data in the new cache corresponds to the last ``cache_size`` blocks. block_count = block_processor.get_block_count() for i in range(block_count, block_count - locator_cache.cache_size, -1): block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(i - 1) assert block_hash in locator_cache.blocks for locator in locator_cache.blocks[block_hash]: assert locator in locator_cache.cache
def get_block_count(self): """ Gets the block count of the best chain. Returns: :obj:`int` or :obj:`None`: The count of the best chain if it can be computed. Returns ``None`` otherwise (not even sure this can actually happen). """ try: block_count = bitcoin_cli(self.btc_connect_params).getblockcount() except JSONRPCException as e: block_count = None logger.error("Couldn't get block count", error=e.error) return block_count
def get_best_block_hash(self): """ Gets the hash of the current best chain tip. Returns: :obj:`str` or :obj:`None`: The hash of the block if it can be found. Returns ``None`` otherwise (not even sure this can actually happen). """ try: block_hash = bitcoin_cli( self.btc_connect_params).getbestblockhash() except JSONRPCException as e: block_hash = None logger.error("Couldn't get block hash", error=e.error) return block_hash
def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_time_offset=30): if real_height: current_height = bitcoin_cli(bitcoind_connect_params).getblockcount() else: current_height = 10 dispute_tx = create_dummy_transaction() dispute_txid = dispute_tx.tx_id.hex() penalty_tx = create_dummy_transaction(dispute_txid) dummy_appointment_data = { "tx": penalty_tx.hex(), "tx_id": dispute_txid, "start_time": current_height + start_time_offset, "end_time": current_height + end_time_offset, "to_self_delay": 20, } # dummy keys for this test client_sk, client_pk = generate_keypair() client_pk_hex = client_pk.format().hex() locator = compute_locator(dispute_txid) blob = Blob(dummy_appointment_data.get("tx")) encrypted_blob = Cryptographer.encrypt(blob, dummy_appointment_data.get("tx_id")) appointment_data = { "locator": locator, "start_time": dummy_appointment_data.get("start_time"), "end_time": dummy_appointment_data.get("end_time"), "to_self_delay": dummy_appointment_data.get("to_self_delay"), "encrypted_blob": encrypted_blob, } signature = Cryptographer.sign(Appointment.from_dict(appointment_data).serialize(), client_sk) data = {"appointment": appointment_data, "signature": signature, "public_key": client_pk_hex} return data, dispute_tx.hex()
def get_block(self, block_hash): """ Gets a block given a block hash by querying ``bitcoind``. Args: block_hash (:obj:`str`): the block hash to be queried. Returns: :obj:`dict` or :obj:`None`: A dictionary containing the requested block data if the block is found. Returns ``None`` otherwise. """ try: block = bitcoin_cli(self.btc_connect_params).getblock(block_hash) except JSONRPCException as e: block = None logger.error("Couldn't get block from bitcoind", error=e.error) return block
def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier, block_processor): # Same as before, but data is now in the Responder w = Watcher( db_manager=db_manager, gatekeeper=gatekeeper, block_processor=block_processor, responder=Responder(db_manager, gatekeeper, carrier, block_processor), sk_der=generate_keypair()[0].to_der(), max_appointments=config.get("MAX_APPOINTMENTS"), blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"), ) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash()) w.awake() w.responder.awake() Builder.update_states(w, blocks[1:], blocks) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert db_manager.load_last_block_hash_responder() == blocks[-1]
def decode_raw_transaction(self, raw_tx): """ Deserializes a given raw transaction (hex encoded) and builds a dictionary representing it with all the associated metadata given by ``bitcoind`` (e.g. confirmation count). Args: raw_tx (:obj:`str`): The hex representation of the transaction. Returns: :obj:`dict` or :obj:`None`: The decoding of the given ``raw_tx`` if the transaction is well formatted. Returns ``None`` otherwise. """ try: tx = bitcoin_cli( self.btc_connect_params).decoderawtransaction(raw_tx) except JSONRPCException as e: tx = None logger.error("Can't build transaction from decoded data", error=e.error) return tx
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor): # Create a fresh responder to simplify the test responder = Responder(temp_db_manager, gatekeeper, carrier, block_processor) chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() trackers = [ create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20) ] subscription_expiry = responder.block_processor.get_block_count() + 110 # Let's set up the trackers first for tracker in trackers: uuid = uuid4().hex # Simulate user registration so trackers can properly expire responder.gatekeeper.registered_users[tracker.user_id] = UserInfo( available_slots=10, subscription_expiry=subscription_expiry) # Add data to the Responder responder.trackers[uuid] = tracker.get_summary() responder.tx_tracker_map[tracker.penalty_txid] = [uuid] responder.missed_confirmations[tracker.penalty_txid] = 0 responder.unconfirmed_txs.append(tracker.penalty_txid) # Assuming the appointment only took a single slot responder.gatekeeper.registered_users[ tracker.user_id].appointments[uuid] = 1 # We also need to store the info in the db responder.db_manager.create_triggered_appointment_flag(uuid) responder.db_manager.store_responder_tracker(uuid, tracker.to_dict()) # Let's start to watch Thread(target=responder.do_watch, daemon=True).start() # And broadcast some of the transactions broadcast_txs = [] for tracker in trackers[:5]: bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) broadcast_txs.append(tracker.penalty_txid) # Mine a block generate_block_w_delay() # The transactions we sent shouldn't be in the unconfirmed transaction list anymore assert not set(broadcast_txs).issubset(responder.unconfirmed_txs) # CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining) generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY + 1) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 20 # Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers generate_blocks_w_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 15 # Check they are not in the Gatekeeper either for tracker in trackers[:5]: assert len(responder.gatekeeper.registered_users[ tracker.user_id].appointments) == 0 # CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY) assert len(responder.unconfirmed_txs) == 0 assert len(responder.trackers) == 0 # Check they are not in the Gatekeeper either for tracker in trackers[5:]: assert len(responder.gatekeeper.registered_users[ tracker.user_id].appointments) == 0
def test_get_completed_trackers(db_manager, carrier, block_processor): initial_height = bitcoin_cli(bitcoind_connect_params).getblockcount() responder = Responder(db_manager, carrier, block_processor) chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() # A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS) # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached trackers_end_conf = { uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10) } trackers_end_no_conf = {} for _ in range(10): tracker = create_dummy_tracker( penalty_rawtx=create_dummy_transaction().hex()) responder.unconfirmed_txs.append(tracker.penalty_txid) trackers_end_no_conf[uuid4().hex] = tracker trackers_no_end = {} for _ in range(10): tracker = create_dummy_tracker( penalty_rawtx=create_dummy_transaction().hex()) tracker.appointment_end += 10 trackers_no_end[uuid4().hex] = tracker all_trackers = {} all_trackers.update(trackers_end_conf) all_trackers.update(trackers_end_no_conf) all_trackers.update(trackers_no_end) # Let's add all to the responder for uuid, tracker in all_trackers.items(): responder.trackers[uuid] = { "locator": tracker.locator, "penalty_txid": tracker.penalty_txid, "appointment_end": tracker.appointment_end, } for uuid, tracker in all_trackers.items(): bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) # The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default generate_blocks(6) # And now let's check completed_trackers = responder.get_completed_trackers(initial_height + 6) completed_trackers_ids = [ tracker_id for tracker_id, confirmations in completed_trackers.items() ] ended_trackers_keys = list(trackers_end_conf.keys()) assert set(completed_trackers_ids) == set(ended_trackers_keys) # Generating 6 additional blocks should also confirm trackers_no_end generate_blocks(6) completed_trackers = responder.get_completed_trackers(initial_height + 12) completed_trackers_ids = [ tracker_id for tracker_id, confirmations in completed_trackers.items() ] ended_trackers_keys.extend(list(trackers_no_end.keys())) assert set(completed_trackers_ids) == set(ended_trackers_keys)
def test_do_watch(temp_db_manager, carrier, block_processor): # Create a fresh responder to simplify the test responder = Responder(temp_db_manager, carrier, block_processor) chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() trackers = [ create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20) ] # Let's set up the trackers first for tracker in trackers: uuid = uuid4().hex responder.trackers[uuid] = { "locator": tracker.locator, "penalty_txid": tracker.penalty_txid, "appointment_end": tracker.appointment_end, } responder.tx_tracker_map[tracker.penalty_txid] = [uuid] responder.missed_confirmations[tracker.penalty_txid] = 0 responder.unconfirmed_txs.append(tracker.penalty_txid) # We also need to store the info in the db responder.db_manager.create_triggered_appointment_flag(uuid) responder.db_manager.store_responder_tracker(uuid, tracker.to_json()) # Let's start to watch Thread(target=responder.do_watch, daemon=True).start() # And broadcast some of the transactions broadcast_txs = [] for tracker in trackers[:5]: bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) broadcast_txs.append(tracker.penalty_txid) # Mine a block generate_block() # The transactions we sent shouldn't be in the unconfirmed transaction list anymore assert not set(broadcast_txs).issubset(responder.unconfirmed_txs) # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator) # Generating 5 additional blocks should complete the 5 trackers generate_blocks(5) assert not set(broadcast_txs).issubset(responder.tx_tracker_map) # Do the rest broadcast_txs = [] for tracker in trackers[5:]: bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) broadcast_txs.append(tracker.penalty_txid) # Mine a block generate_blocks(6) assert len(responder.tx_tracker_map) == 0
def send_transaction(self, rawtx, txid): """ Tries to send a given raw transaction to the Bitcoin network using ``bitcoind``. Args: rawtx (:obj:`str`): a (potentially) signed raw transaction ready to be broadcast. txid (:obj:`str`): the transaction id corresponding to ``rawtx``. Returns: :obj:`Receipt`: A receipt reporting whether the transaction was successfully delivered or not and why. """ if txid in self.issued_receipts: logger.info("Transaction already sent", txid=txid) receipt = self.issued_receipts[txid] return receipt try: logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli(self.btc_connect_params).sendrawtransaction(rawtx) receipt = Receipt(delivered=True) except JSONRPCException as e: errno = e.error.get("code") # Since we're pushing a raw transaction to the network we can face several rejections if errno == RPC_VERIFY_REJECTED: # DISCUSS: 37-transaction-rejection receipt = Receipt(delivered=False, reason=RPC_VERIFY_REJECTED) logger.error("Transaction couldn't be broadcast", error=e.error) elif errno == RPC_VERIFY_ERROR: # DISCUSS: 37-transaction-rejection receipt = Receipt(delivered=False, reason=RPC_VERIFY_ERROR) logger.error("Transaction couldn't be broadcast", error=e.error) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: logger.info( "Transaction is already in the blockchain. Getting confirmation count", txid=txid) # If the transaction is already in the chain, we get the number of confirmations and watch the tracker # until the end of the appointment tx_info = self.get_transaction(txid) if tx_info is not None: confirmations = int(tx_info.get("confirmations")) receipt = Receipt(delivered=True, confirmations=confirmations, reason=RPC_VERIFY_ALREADY_IN_CHAIN) else: # There's a really unlikely edge case where a transaction can be reorged between receiving the # notification and querying the data. Notice that this implies the tx being also kicked off the # mempool, which again is really unlikely. receipt = Receipt(delivered=False, reason=RPC_TX_REORGED_AFTER_BROADCAST) elif errno == RPC_DESERIALIZATION_ERROR: # Adding this here just for completeness. We should never end up here. The Carrier only sends txs # handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly # deserialized logger.info("Transaction cannot be deserialized".format(txid)) receipt = Receipt(delivered=False, reason=RPC_DESERIALIZATION_ERROR) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logger.error("JSONRPCException", method="Carrier.send_transaction", error=e.error) receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) self.issued_receipts[txid] = receipt return receipt
def test_get_completed_trackers(db_manager, gatekeeper, carrier, block_processor): responder = Responder(db_manager, gatekeeper, carrier, block_processor) chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params) chain_monitor.monitor_chain() # A complete tracker is a tracker which penalty transaction has been irrevocably resolved (i.e. has reached 100 # confirmations) # We'll create 3 type of txs: irrevocably resolved, confirmed but not irrevocably resolved, and unconfirmed trackers_ir_resolved = { uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10) } trackers_confirmed = { uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10) } trackers_unconfirmed = {} for _ in range(10): tracker = create_dummy_tracker( penalty_rawtx=create_dummy_transaction().hex()) responder.unconfirmed_txs.append(tracker.penalty_txid) trackers_unconfirmed[uuid4().hex] = tracker all_trackers = {} all_trackers.update(trackers_ir_resolved) all_trackers.update(trackers_confirmed) all_trackers.update(trackers_unconfirmed) # Let's add all to the Responder for uuid, tracker in all_trackers.items(): responder.trackers[uuid] = tracker.get_summary() for uuid, tracker in trackers_ir_resolved.items(): bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) generate_block_w_delay() for uuid, tracker in trackers_confirmed.items(): bitcoin_cli(bitcoind_connect_params).sendrawtransaction( tracker.penalty_rawtx) # ir_resolved have 100 confirmations and confirmed have 99 generate_blocks_w_delay(99) # Let's check completed_trackers = responder.get_completed_trackers() ended_trackers_keys = list(trackers_ir_resolved.keys()) assert set(completed_trackers) == set(ended_trackers_keys) # Generating 1 additional blocks should also include confirmed generate_block_w_delay() completed_trackers = responder.get_completed_trackers() ended_trackers_keys.extend(list(trackers_confirmed.keys())) assert set(completed_trackers) == set(ended_trackers_keys)