def test_send_block_to_node(self, label, node_index, block, send_rate, expected_time_to_send, cmd_timeout_base, cmd_timeout_base_ibd, cmd_timeout_per_peer, expect_timeout, mocktime=0, additional_conn_blocks=[], additional_conn_send_rate=0): node_args = [ "-genesisactivationheight=1", # to be able to process large transactions f"-blockdownloadtimeoutbasepercent={cmd_timeout_base}", f"-blockdownloadtimeoutbaseibdpercent={cmd_timeout_base_ibd}", f"-blockdownloadtimeoutperpeerpercent={cmd_timeout_per_peer}", ] if mocktime: node_args.append(f"-mocktime={mocktime}") with self.run_node_with_connections( title=label, node_index=node_index, args=node_args, number_of_connections=2) as (conn, conn_additional): timeout_log_line = f"Timeout downloading block {block.hash}" timeout_log_line_count = count_log_msg(conn.rpc, timeout_log_line) conn.rate_limit_sending(send_rate) send_by_headers(conn, [block], True) # sleep for a half of time that will be needed for sending the block sleep(expected_time_to_send / 3) if additional_conn_blocks: conn_additional.rate_limit_sending(additional_conn_send_rate) send_by_headers(conn_additional, additional_conn_blocks, True) if expect_timeout: # wait until we are disconnected from the node (this will be a consequence of the block downloading timeout) wait_until(lambda: not conn.connected, timeout=expected_time_to_send, check_interval=0.3) assert count_log_msg( conn.rpc, timeout_log_line) == timeout_log_line_count + 1 else: wait_until(lambda: conn.rpc.getbestblockhash() == block.hash, timeout=expected_time_to_send, check_interval=3) assert count_log_msg( conn.rpc, timeout_log_line) == timeout_log_line_count
def send_branches(self, first_branch, second_branch, wait): send_by_headers(first_branch['conn'], first_branch['blocks'], first_branch['do_send_blocks']) if wait: first_branch['conn'].cb.sync_with_ping() send_by_headers(second_branch['conn'], second_branch['blocks'], second_branch['do_send_blocks'])
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block( conn1, last_block_time=last_block_time) branch_1_blocks = [branch_1_root] for _ in range(SAFE_MODE_MAX_FORK_DISTANCE): new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block( conn2, last_block_time=last_block_time) branch_2_blocks = [branch_2_root] for _ in range(SAFE_MODE_MAX_FORK_DISTANCE + SAFE_MODE_MIN_POW_DIFFERENCE + 2): new_block, last_block_time = make_block( conn2, branch_2_blocks[-1], last_block_time=last_block_time) branch_2_blocks.append(new_block) # send main branch that should be active tip send_by_headers(conn1, branch_1_blocks[:SAFE_MODE_MAX_FORK_DISTANCE], do_send_blocks=True) # send alternative branch - headers only send_by_headers(conn2, branch_2_blocks, do_send_blocks=False) # active tip is one before last block from branch 1 and branch 2 has status headers-only wait_for_tip(conn1, branch_1_blocks[-2].hash) wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "headers-only") # we should entered the safe mode with UNKNOWN as alternative branch is more than 6 blocks ahead # and still in max range of SAFE_MODE_MAX_FORK_DISTANCE blocks try: conn1.rpc.getbalance() assert False, "Should not come to here, should raise exception in line above." except JSONRPCException as e: assert e.error[ "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details." # add one more block to active chain send_by_headers(conn1, branch_1_blocks[SAFE_MODE_MAX_FORK_DISTANCE:], do_send_blocks=True) # active tip is last block from branch 1 wait_for_tip(conn1, branch_1_blocks[-1].hash) # alternative chain is now more than 288 blocks away so we should exit safe mode conn1.rpc.getbalance()
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block( conn1, last_block_time=last_block_time) branch_1_blocks = [branch_1_root] for _ in range(SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1): new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block( conn2, last_block_time=last_block_time) branch_2_blocks = [branch_2_root] for _ in range(SAFE_MODE_DEFAULT_MAX_FORK_DISTANCE): new_block, last_block_time = make_block( conn2, branch_2_blocks[-1], last_block_time=last_block_time) branch_2_blocks.append(new_block) # send first branch that should be active tip send_by_headers(conn1, branch_1_blocks, do_send_blocks=True) # wait for active tip wait_for_tip(conn1, branch_1_blocks[-1].hash) # send second branch with more POW send_by_headers( conn2, branch_2_blocks[:SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 3], do_send_blocks=True) # active tip is from branch 2 and branch 1 has status valid-fork wait_for_tip( conn1, branch_2_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 2].hash) wait_for_tip_status(conn1, branch_1_blocks[-1].hash, "valid-fork") # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_DEFAULT_MIN_VALID_FORK_POW pow # and last common block is less than SAFE_MODE_DEFAULT_MAX_VALID_FORK_DISTANCE from active tip assert conn1.rpc.getsafemodeinfo()["safemodeenabled"] # send more blockst of second branch send_by_headers(conn1, branch_2_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 3:], do_send_blocks=True) # active tip is last block from branch 2 wait_for_tip(conn1, branch_2_blocks[-1].hash) # we should exit safe mode because fork base is too far from active tip assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block(conn1, last_block_time = last_block_time) branch_1_blocks = [branch_1_root] for _ in range(SAFE_MODE_MIN_VALID_FORK_LENGTH + 1): new_block, last_block_time = make_block(conn1, branch_1_blocks[-1], last_block_time = last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block(conn2, last_block_time = last_block_time) branch_2_blocks = [branch_2_root] for _ in range(SAFE_MODE_MAX_VALID_FORK_DISTANCE): new_block, last_block_time = make_block(conn2, branch_2_blocks[-1], last_block_time = last_block_time) branch_2_blocks.append(new_block) # send first branch that should be active tip send_by_headers(conn1, branch_1_blocks, do_send_blocks=True) # wait for active tip wait_for_tip(conn1, branch_1_blocks[-1].hash) # send second branch with more POW send_by_headers(conn2, branch_2_blocks[:SAFE_MODE_MIN_VALID_FORK_LENGTH + 3], do_send_blocks=True) # active tip is from branch 2 and branch 1 has status valid-fork wait_for_tip(conn1, branch_2_blocks[SAFE_MODE_MIN_VALID_FORK_LENGTH + 2].hash) wait_for_tip_status(conn1, branch_1_blocks[-1].hash, "valid-fork") # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_MIN_VALID_FORK_POW pow # and last common block is less than SAFE_MODE_MAX_VALID_FORK_DISTANCE from active tip try: conn1.rpc.getbalance() assert False, "Should not come to here, should raise exception in line above." except JSONRPCException as e: assert e.error["message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected." # send more blockst of second branch send_by_headers(conn1, branch_2_blocks[SAFE_MODE_MIN_VALID_FORK_LENGTH + 3:], do_send_blocks=True) # active tip is last block from branch 2 wait_for_tip(conn1,branch_2_blocks[-1].hash) # we should exit safe mode because fork base is too far from active tip conn1.rpc.getbalance()
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block( conn1, last_block_time=last_block_time) branch_1_blocks = [branch_1_root] for _ in range(SAFE_MODE_MAX_FORK_DISTANCE): new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block( conn2, makeValid=False, last_block_time=last_block_time) branch_2_blocks = [branch_2_root] for _ in range(SAFE_MODE_MAX_FORK_DISTANCE + SAFE_MODE_MIN_POW_DIFFERENCE + 1): new_block, last_block_time = make_block( conn2, branch_2_blocks[-1], last_block_time=last_block_time) branch_2_blocks.append(new_block) # send first branch that should be active tip send_by_headers(conn1, branch_1_blocks, do_send_blocks=True) # wait for active tip wait_for_tip(conn1, branch_1_blocks[-1].hash) # send second branch with more POW send_by_headers(conn2, branch_2_blocks, do_send_blocks=False) # active tip should be from first branch and second branch should have headers-only status wait_for_tip(conn1, branch_1_blocks[-1].hash) wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "headers-only") # we should not be in safe mode conn1.rpc.getbalance() # From time to time this test can run faster than expected and # the older blocks for batch 2 headers are not yet requested. # In that case they will be rejected due to being too far away # form the tip. In that case we need to send them again once they # are requested. def on_getdata(conn, msg): for i in msg.inv: if i.type != 2: # MSG_BLOCK error_msg = f"Unexpected data requested {i}" self.log.error(error_msg) raise NotImplementedError(error_msg) for block in branch_2_blocks: if int(block.hash, 16) == i.hash: conn.send_message(msg_block(block)) break conn2.cb.on_getdata = on_getdata # send sencond branch full blocks for block in branch_2_blocks: conn2.send_message(msg_block(block)) # second branch should now be invalid wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid") wait_for_tip(conn1, branch_1_blocks[-1].hash) # we should not be in safe mode conn1.rpc.getbalance()
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block( conn1, last_block_time=last_block_time) branch_1_blocks = [branch_1_root] for _ in range(10): new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block( conn2, makeValid=False, last_block_time=last_block_time) branch_2_blocks = [branch_2_root] for _ in range(30): new_block, last_block_time = make_block( conn2, branch_2_blocks[-1], last_block_time=last_block_time) branch_2_blocks.append(new_block) # send main branch that should be active tip send_by_headers(conn1, branch_1_blocks, do_send_blocks=True) # send block header of the first block of branch 2 but not send block itself send_by_headers(conn2, branch_2_blocks[:1], do_send_blocks=False) # send first half of the blocks from the second branch send_by_headers(conn2, branch_2_blocks[1:20], do_send_blocks=True) # active tip is last block from branch 1 and branch 2 has status headers-only wait_for_tip(conn1, branch_1_blocks[-1].hash) wait_for_tip_status(conn1, branch_2_blocks[19].hash, "headers-only") # we should entered the safe mode with UNKNOWN because we don't have data of the first block try: conn1.rpc.getbalance() assert False, "Should not come to here, should raise exception in line above." except JSONRPCException as e: assert e.error[ "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details." # send headers only for the rest of the second branch send_by_headers(conn2, branch_2_blocks[20:], do_send_blocks=False) # we should remain in the safe mode with UNKNOWN try: conn1.rpc.getbalance() assert False, "Should not come to here, should raise exception in line above." except JSONRPCException as e: assert e.error[ "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details." # send contents of first block of second branch # this block is invalid and should invalidate whole second branch conn2.send_message(msg_block(branch_2_blocks[0])) # make sure that block is processed before doing any aserts by waiting for reject # we cannot use sync_with_ping here because we sent invalid block and connection will be banned and closed conn2.cb.wait_for_reject() # active tip should still be from branch 1 and branch 2 should be invalid wait_for_tip(conn1, branch_1_blocks[-1].hash) wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid") # safe mode message should have now changed - we have invalid chain that triggers safe mode try: conn1.rpc.getbalance() assert False, "Should not come to here, should raise exception in line above." except JSONRPCException as e: assert e.error[ "message"] == "Safe mode: Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade. A large invalid fork has been detected." # add more blocks to active chain so fork will no longer have more than SAFE_MODE_MIN_POW_DIFFERENCE blocks new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_aditional_blocks = [new_block] for _ in range(20 - SAFE_MODE_MIN_POW_DIFFERENCE): new_block, last_block_time = make_block( conn1, branch_1_aditional_blocks[-1], last_block_time=last_block_time) branch_1_aditional_blocks.append(new_block) # send additional blocks with data to active chain send_by_headers(conn1, branch_1_aditional_blocks, do_send_blocks=True) # check that active tip is from branch 1 wait_for_tip(conn1, branch_1_aditional_blocks[-1].hash) # we are not in the Safe mode any more fork is no longer 6 blocks ahead of # active chain conn1.rpc.getbalance()
def run_test(self): self.PORT = 8765 self.webhook_messages = [] self.server = HTTPServer(('', self.PORT), self.make_handler) self.start_server() args = [ f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode", ] with self.run_node_with_connections("Test Reorg", 0, args, 2) as (conn, conn2): conn.rpc.generate(1) root_block, root_block_time = make_block(conn, last_block_time=0) self.last_block_time = root_block_time send_by_headers(conn, [root_block], do_send_blocks=True) wait_for_tip(conn, root_block.hash) # the main chain, just enough to be able to riger the safe mode after reorg main_chain = self.make_chain(conn, root_block, SAFE_MODE_DEFAULT_MIN_FORK_LENGTH) expected_main_chain_fork_data = { "forkfirstblock": main_chain[0].hash, "tips": {main_chain[-1].hash}, "lastcommonblock": root_block.hash } # the new chain, just enough to be able to triger the reorg new_chain = self.make_chain(conn, root_block, len(main_chain) + 1) expected_new_chain_fork_data = { "forkfirstblock": new_chain[0].hash, "tips": {new_chain[-1].hash}, "lastcommonblock": root_block.hash } # sending the main chain send_by_headers(conn, main_chain, do_send_blocks=True) wait_for_tip(conn, main_chain[-1].hash) # send headers of the new chain and verify that we are in the safe mode send_by_headers(conn, new_chain, do_send_blocks=False) wait_for_tip_status(conn, new_chain[-1].hash, "headers-only") self.wait_for_safe_mode_data(conn.rpc, [expected_new_chain_fork_data]) self.check_last_webhook_msg_reorged_from(None) self.webhook_messages = [] # now send blocks of the new chain for bl in new_chain: conn.send_message(msg_block(bl)) # a reorg happened, tip should be at last block of the new chain wait_for_tip(conn, new_chain[-1].hash) # still in the safe mode, but fork is the main chain self.wait_for_safe_mode_data(conn.rpc, [expected_main_chain_fork_data]) # last block caused an reorg, check if got correct notification self.check_last_webhook_msg_reorged_from(main_chain[-1].hash, len(main_chain)) # extending the new chain, just enough to be able to triger the safe mode after sending headers new_chain_extension = self.make_chain( conn, new_chain[-1], SAFE_MODE_DEFAULT_MIN_FORK_LENGTH) expected_new_chain_ext_fork_data = { "forkfirstblock": new_chain_extension[0].hash, "tips": {new_chain_extension[-1].hash}, "lastcommonblock": new_chain[-1].hash } # sending the new chain extension send_by_headers(conn, new_chain_extension, do_send_blocks=False) wait_for_tip_status(conn, new_chain_extension[-1].hash, "headers-only") # two forks main chain from before and new chain extension self.wait_for_safe_mode_data(conn.rpc, [ expected_main_chain_fork_data, expected_new_chain_ext_fork_data, ]) # no reorg self.check_last_webhook_msg_reorged_from(None) # now send blocks of the new chain extension for bl in new_chain_extension: conn.send_message(msg_block(bl)) # the tip has advanced wait_for_tip(conn, new_chain_extension[-1].hash) self.wait_for_safe_mode_data(conn.rpc, [ expected_main_chain_fork_data, ]) # still no reorg self.check_last_webhook_msg_reorged_from(None) # invalidating firs block of the new chain extension conn.rpc.invalidateblock(new_chain_extension[0].hash) # rolled back wait_for_tip(conn, new_chain[-1].hash) self.wait_for_safe_mode_data(conn.rpc, [ expected_main_chain_fork_data, expected_new_chain_ext_fork_data, ]) # rolling back is qualified as an reorg self.check_last_webhook_msg_reorged_from( new_chain_extension[-1].hash, len(new_chain_extension)) self.kill_server()
def run_test(self): # Turn on a webhook server self.start_webhook_server() # Create a P2P connection node = self.nodes[0] peer = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), node, peer) peer.add_connection(connection) NetworkThread().start() peer.wait_for_verack() # Create an initial block with a coinbase we will split into multiple utxos initialBlock, _ = make_block(connection) coinbaseTx = initialBlock.vtx[0] send_by_headers(connection, [initialBlock], do_send_blocks=True) wait_for_tip(connection, initialBlock.hash) node.generate(101) block101hex = node.getblock(node.getbestblockhash(), False) block101dict = node.getblock(node.getbestblockhash(), 2) block101 = FromHex(CBlock(), block101hex) block101.height = block101dict['height'] block101.rehash() # Create a block with a transaction spending coinbaseTx of a previous block and making multiple outputs for future transactions to spend utxoBlock, _ = make_block(connection, parent_block=block101) utxoTx = create_tx(coinbaseTx, 0, 1 * COIN) # Create additional 48 outputs (we let 1 COIN as fee) for _ in range(48): utxoTx.vout.append(CTxOut(1 * COIN, CScript([OP_TRUE]))) # Add to block utxoTx.rehash() utxoBlock.vtx.append(utxoTx) utxoBlock.hashMerkleRoot = utxoBlock.calc_merkle_root() utxoBlock.solve() send_by_headers(connection, [utxoBlock], do_send_blocks=True) wait_for_tip(connection, utxoBlock.hash) # Make sure serialization/deserialization works as expected # Create dsdetected message. The content is not important here. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(initialBlock)], DSMerkleProof(1, utxoTx, utxoBlock.hashMerkleRoot, [MerkleProofNode(utxoBlock.vtx[0].sha256)])) ]) dsdBytes = dsdMessage.serialize() dsdMessageDeserialized = msg_dsdetected() dsdMessageDeserialized.deserialize(BytesIO(dsdBytes)) assert_equal(str(dsdMessage), str(dsdMessageDeserialized)) # Send a message containing random bytes. Webhook should not receive the notification. peer.send_and_ping(fake_msg_dsdetected()) assert_equal(self.get_JSON_notification(), None) # Create two blocks with transactions spending the same utxo blockA, _ = make_block(connection, parent_block=utxoBlock) blockB, _ = make_block(connection, parent_block=utxoBlock) blockF, _ = make_block(connection, parent_block=utxoBlock) txA = create_tx(utxoBlock.vtx[1], 0, int(0.8 * COIN)) txB = create_tx(utxoBlock.vtx[1], 0, int(0.9 * COIN)) txF = create_tx(utxoBlock.vtx[1], 0, int(0.7 * COIN)) txA.rehash() txB.rehash() txF.rehash() blockA.vtx.append(txA) blockB.vtx.append(txB) blockF.vtx.append(txF) blockA.hashMerkleRoot = blockA.calc_merkle_root() blockB.hashMerkleRoot = blockB.calc_merkle_root() blockF.hashMerkleRoot = blockF.calc_merkle_root() blockA.calc_sha256() blockB.calc_sha256() blockF.calc_sha256() blockA.solve() blockB.solve() blockF.solve() start_banscore = node.getpeerinfo()[0]['banscore'] # Webhook should not receive the notification if we send dsdetected message with only one block detail. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with two block details and one is containing no headers. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where last headers in block details do not have a common previous block hash. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where block details does not have headers in proper order. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the empty merkle proof. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof()) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong index in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong transaction in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txA, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle root (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockA.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockA.hashMerkleRoot)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the merkle proof having an additional unexpected node (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [ MerkleProofNode(blockB.vtx[0].sha256), MerkleProofNode(blockA.hashMerkleRoot) ])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the valid proof, but transaction is a coinbase transaction dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, blockB.vtx[0], blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[1].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with transactions that are not double spending # Create a block similar as before, but with a transaction spending a different utxo blockC, _ = make_block(connection, parent_block=utxoBlock) txC = create_tx(utxoBlock.vtx[1], 1, int(0.7 * COIN)) blockC.vtx.append(txC) blockC.hashMerkleRoot = blockC.calc_merkle_root() blockC.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockC)], DSMerkleProof(1, txC, blockC.hashMerkleRoot, [MerkleProofNode(blockC.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if the two double spending transactions are actually the same transaction (having same txid) # Create a block similar as before, but with a transaction spending a different utxo blockD, _ = make_block(connection, parent_block=utxoBlock) blockD.vtx.append(txA) blockD.hashMerkleRoot = blockD.calc_merkle_root() blockD.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockD)], DSMerkleProof(1, txA, blockD.hashMerkleRoot, [MerkleProofNode(blockD.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if header cannot pow # note hat pow is so easy in regtest that nonce can often be hence we have to select the nonce carefully blockE, _ = make_block(connection, parent_block=utxoBlock) blockE.vtx.append(txB) blockE.hashMerkleRoot = blockE.calc_merkle_root() nonce = blockE.nNonce while True: blockE.solve() if blockE.nNonce > nonce: blockE.nNonce = nonce break nonce += 1 blockE.nNonce = nonce dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockE)], DSMerkleProof(1, txB, blockE.hashMerkleRoot, [MerkleProofNode(blockE.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) end_banscore = node.getpeerinfo()[0]['banscore'] assert ((end_banscore - start_banscore) / 10 == 13 ) # because we have 13 negative tests so far # Finally, webhook should receive the notification if we send a proper dsdetected message dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal(str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) # Repeat previous test but change the order of the BlockDetails, the node should identify this as a duplicate dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # repeat previous test but generate many blocks in the node to age the notificatoin message. # very old notification messages shall be ignored. We use the same thresholds as safe mode. # We will hardcode this threshold for now until branch we depend on is merged node.generate(289) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockF)], DSMerkleProof(1, txF, blockF.hashMerkleRoot, [MerkleProofNode(blockF.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Create number of random valid block trees and send dsdetected P2P message for each maxNumberOfBranches = 10 maxNumberOfBlocksPerBranch = 30 for _ in range(10): blockTree = self.createRandomBlockTree(maxNumberOfBranches, maxNumberOfBlocksPerBranch, utxoBlock, [utxoBlock.vtx[1]]) dsdMessage = self.createDsDetectedMessageFromBlockTree(blockTree) peer.send_and_ping(dsdMessage) # Notification should be received as generated dsdetected message is valid json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal( str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) self.stop_webhook_server()
def run_rest_case(self, min_fork_len, max_height_difference, max_fork_distance): args = [ f"-safemodemaxforkdistance={max_fork_distance}", f"-safemodeminforklength={min_fork_len}", f"-safemodeminblockdifference={max_height_difference}", f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode", ] with self.run_node_with_connections("Preparation", 0, args, 2) as (conn1, conn2): conn1.rpc.generate(1) root_block, root_block_time = make_block(conn1, last_block_time=0) self.last_block_time = root_block_time send_by_headers(conn1, [root_block], do_send_blocks=True) wait_for_tip(conn1, root_block.hash) # We will create # ======================================================== # mc -> main chain mc[N] is active tip # sf -> short fork # df -> distant fork # ld -> low height difference fork # # |--------------max_fork_distance------------------------| # root - mc[0] - mc[1] - mc[2] - mc[3] - ... - mc[N-1] - mc[N] # | \ \ # | \ sf[0] - sf[1] - ... -sf[N] # | \ |-----min_fork_len------| # \ \ # \ ld[0] - ... - ld[N] # \ |---max_height_difference---| -> (if negative ld[N] is behind active tip, infront otherwise) # \ # \ # \ # df[0] - df[1] - ... df[N] # # the main chain, make it long enough to be able to create distant fork main_chain = self.make_chain(conn1, root_block, max_fork_distance) # the distant fork, last common block is at limit of acceptance distant_fork_len = max( max_fork_distance + max_height_difference, min_fork_len) + 10 # make it longer than neccesary distant_fork = self.make_chain(conn1, root_block, distant_fork_len) expected_distant_fork_data = { "forkfirstblock": distant_fork[0].hash, "tips": {distant_fork[-1].hash}, "lastcommonblock": root_block.hash } # the short fork, fork with minimal acceptable length short_fork = self.make_chain(conn1, main_chain[-2], min_fork_len) expected_short_fork_data = { "forkfirstblock": short_fork[0].hash, "tips": {short_fork[-1].hash}, "lastcommonblock": main_chain[-2].hash } # the low height difference fork; a fork whose tip is at minimal acceptable height relative to the chain tip low_height_difference_fork_len = len( main_chain ) + max_height_difference - 1 # minus 1 is beacause we are starting at first block of the main chain low_height_difference_fork = self.make_chain( conn1, main_chain[0], low_height_difference_fork_len) expected_low_height_difference_fork_data = { "forkfirstblock": low_height_difference_fork[0].hash, "tips": {low_height_difference_fork[-1].hash}, "lastcommonblock": main_chain[0].hash } # send main branch that should be active chain send_by_headers(conn1, main_chain, do_send_blocks=True) wait_for_tip(conn1, main_chain[-1].hash) # no forks yes, not in the safe mode self.wait_for_safe_mode_data(conn1.rpc, []) # not in safe mode send_by_headers(conn1, distant_fork, do_send_blocks=False) wait_for_tip_status(conn1, distant_fork[-1].hash, "headers-only") # distant fork triggers the safe mode self.wait_for_safe_mode_data(conn1.rpc, [expected_distant_fork_data]) send_by_headers(conn1, short_fork, do_send_blocks=False) wait_for_tip_status(conn1, short_fork[-1].hash, "headers-only") # two forks triggering the safe mode: distant fork and short fork self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_short_fork_data, ]) send_by_headers(conn1, low_height_difference_fork, do_send_blocks=False) wait_for_tip_status(conn1, low_height_difference_fork[-1].hash, "headers-only") # all three forks triggering the safe mode self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) # stopping the node self.webhook_messages = [] args_off_by_one = [ f"-safemodemaxforkdistance={max_fork_distance-1}", f"-safemodeminforklength={min_fork_len+1}", f"-safemodeminblockdifference={max_height_difference+1}", f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode", ] # Restaring the node with limits off by 1 so no fork satisfies safe mode activation criteria with self.run_node_with_connections("Preparation", 0, args_off_by_one, 2) as (conn1, conn2): # The node is not in the safe mode, no forks self.wait_for_safe_mode_data(conn1.rpc, [], check_webhook_messages=False) assert len( self.webhook_messages ) == 0 # we are starting without safe mode, the message is not sent # Restaring the node with original params, the node should be in the safe mode again with self.run_node_with_connections("Preparation", 0, args, 2) as (conn1, conn2): # the safe mode is at the same state as before first restart self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) # We will add three more extensions to the chain #===================================================== # ... - mc[N-1] - mc[N] - mc_extension sf_extension_2 # \ / # sf[0] - sf[1] - ... - sf[N-1] - sf[N] # \ # sf_extension short_fork_extension = self.make_chain(conn1, short_fork[-2], 1) send_by_headers(conn1, short_fork_extension, do_send_blocks=False) # when adding a new tip to the short branch we will just add a new tip to an existing fork expected_short_fork_data["tips"].add(short_fork_extension[-1].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) # ignore tips of the short short branch making it not triggering safe mode any more conn1.rpc.ignoresafemodeforblock(short_fork_extension[-1].hash) conn1.rpc.ignoresafemodeforblock(short_fork[-1].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, ]) # reconsidering previously ignored blocks conn1.rpc.reconsidersafemodeforblock(short_fork_extension[-1].hash) conn1.rpc.reconsidersafemodeforblock(short_fork[-1].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) # ignoring root of the short fork, short fork will not trigger the safe mode. conn1.rpc.ignoresafemodeforblock(short_fork[0].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, ]) # extend ignored short fork with one more tip, we should ignore this block also because its ancestor is ignored short_fork_extension_2 = self.make_chain(conn1, short_fork[-2], 1) send_by_headers(conn1, short_fork_extension_2, do_send_blocks=True) wait_for_tip_status(conn1, short_fork_extension_2[-1].hash, "headers-only") self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, ]) # but when it will be reconsidered the new tip should be visible expected_short_fork_data["tips"].add( short_fork_extension_2[-1].hash) # reconsidering one of the tips of the short fork will revert ignoring of the root block conn1.rpc.reconsidersafemodeforblock(short_fork[-1].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) main_chain_extension = self.make_chain(conn1, main_chain[-1], 1) send_by_headers(conn1, main_chain_extension, do_send_blocks=True) # we have extended the main chain so distant fork became too distant and low height for became to low # not in the safe mode anymore self.wait_for_safe_mode_data(conn1.rpc, [ expected_short_fork_data, ]) # we are now invalidating main chain extension so distant and low fork are triggering the safe mode again conn1.rpc.invalidateblock(main_chain_extension[0].hash) self.wait_for_safe_mode_data(conn1.rpc, [ expected_distant_fork_data, expected_low_height_difference_fork_data, expected_short_fork_data, ]) pass
def run_test(self): with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block( conn1, last_block_time=last_block_time) branch_1_blocks = [branch_1_root] for _ in range(SAFE_MODE_DEFAULT_MAX_FORK_DISTANCE): new_block, last_block_time = make_block( conn1, branch_1_blocks[-1], last_block_time=last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block( conn2, last_block_time=last_block_time) branch_2_blocks = [branch_2_root] for _ in range(SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1): new_block, last_block_time = make_block( conn2, branch_2_blocks[-1], last_block_time=last_block_time) branch_2_blocks.append(new_block) # send main branch that should be active tip send_by_headers( conn1, branch_1_blocks[:SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 2], do_send_blocks=True) # send alternative branch send_by_headers(conn2, branch_2_blocks, do_send_blocks=True) # active tip is from branch 1 and brach 2 has status valid-headers wait_for_tip( conn1, branch_1_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1].hash) wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "valid-headers") # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_DEFAULT_MIN_VALID_FORK_POW pow # and last common block is less than SAFE_MODE_DEFAULT_MAX_VALID_FORK_DISTANCE from active tip assert conn1.rpc.getsafemodeinfo()["safemodeenabled"] with self.run_node_with_connections("Restart node in safe mode", 0, None, 1) as conn: conn1 = conn[0] # check that we are in safe mode after restart assert conn1.rpc.getsafemodeinfo()["safemodeenabled"] # send main branch that should be active tip send_by_headers(conn1, branch_1_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 2:], do_send_blocks=True) # active tip is last block from branch 1 wait_for_tip(conn1, branch_1_blocks[-1].hash) # we should exit safe mode because fork base is too far from active tip assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]
def run_test(self): MAX_FORK_DISTANCE = 10 MIN_FORK_LENGTH = 3 MIN_FORK_DIFFERENCE = 1 args= [f"-safemodemaxforkdistance={MAX_FORK_DISTANCE}", f"-safemodeminforklength={MIN_FORK_LENGTH}", f"-safemodeminblockdifference={MIN_FORK_DIFFERENCE}",] with self.run_node_with_connections("Preparation", 0, args, 2) as (conn1, conn2): last_block_time = 0 conn1.rpc.generate(1) branch_1_root, last_block_time = make_block(conn1, last_block_time = last_block_time) branch_1_blocks = [branch_1_root] for _ in range(MAX_FORK_DISTANCE): new_block, last_block_time = make_block(conn1, branch_1_blocks[-1], last_block_time = last_block_time) branch_1_blocks.append(new_block) branch_2_root, last_block_time = make_block(conn2, makeValid=False, last_block_time = last_block_time) branch_2_blocks = [branch_2_root] for _ in range(MAX_FORK_DISTANCE + MIN_FORK_DIFFERENCE + 1): new_block, last_block_time = make_block(conn2, branch_2_blocks[-1], last_block_time = last_block_time) branch_2_blocks.append(new_block) # send first branch that should be active tip send_by_headers(conn1, branch_1_blocks, do_send_blocks=True) wait_for_tip(conn1, branch_1_blocks[-1].hash) # send second branch with more POW send_by_headers(conn2, branch_2_blocks, do_send_blocks=False) wait_for_tip(conn1, branch_1_blocks[-1].hash) wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "headers-only") # we should not be in safe mode (distance to the fork is too large) assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"] conn1.rpc.invalidateblock(branch_1_blocks[-1].hash) wait_for_tip(conn1, branch_1_blocks[-2].hash) # here we have shortened distance from the active tip to the fork root so the safe mode should be activated assert conn1.rpc.getsafemodeinfo()["safemodeenabled"] conn1.rpc.reconsiderblock(branch_1_blocks[-1].hash) wait_for_tip(conn1, branch_1_blocks[-1].hash) # returning to the old state (distance to the fork is too large) assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"] # From time to time this test can run faster than expected and # the older blocks for batch 2 headers are not yet requested. # In that case they will be rejected due to being too far away # form the tip. In that case we need to send them again once they # are requested. def on_getdata(conn, msg): for i in msg.inv: if i.type != 2: # MSG_BLOCK error_msg = f"Unexpected data requested {i}" self.log.error(error_msg) raise NotImplementedError(error_msg) for block in branch_2_blocks: if int(block.hash, 16) == i.hash: conn.send_message(msg_block(block)) break conn2.cb.on_getdata = on_getdata # send sencond branch full blocks for block in branch_2_blocks: conn2.send_message(msg_block(block)) tips = conn2.rpc.getchaintips() # second branch should now be invalid wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid") wait_for_tip(conn1, branch_1_blocks[-1].hash) # we should not be in safe mode assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]