def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] maxheaders = 2000 while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-maxheaders - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with mininode_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash
def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_code and reject_reason are set: assert that the correct reject message is received""" with mininode_lock: self.reject_code_received = None self.reject_reason_received = None for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 self.send_message(msg_headers([CBlockHeader(blocks[-1])])) if request_block: wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash if reject_code is not None: wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock) if reject_reason is not None: wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message)
def run_test(self): # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info("First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def test_oversized_headers_msg(self): size = MAX_HEADERS_RESULTS + 1 self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
def run_test(self): test_node = self.nodes[0].add_p2p_connection(P2PInterface()) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [ n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes ] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append( create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_and_ping(msg_block(blocks_h2[0])) min_work_node.send_and_ping(msg_block(blocks_h2[1])) assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1" ) # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_and_ping(msg_block(block_h1f)) tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_and_ping(msg_block(block_h2f)) # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1) block_h3.solve() test_node.send_and_ping(msg_block(block_h3)) # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_and_ping(msg_block(all_blocks[1])) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_and_ping(msg_block(all_blocks[1])) self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_and_ping(msg_block(block_h1f)) assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with p2p_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)])) test_node.sync_with_ping() with p2p_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_and_ping(msg_block(block_h1f)) assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime + 1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime + 1) # block_291 spends a coinbase below maturity! block_291.vtx.append( create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime + 1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_and_ping(headers_message) tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_and_ping(msg_block(block_290f)) self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime + 1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync self.connect_nodes(0, 1) self.sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def run_test(self): self.setup_stake_coins(*self.nodes) # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) fork_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) utxo_manager = UTXOManager(self.nodes[0], fork_snapshot_meta) genesis_coin = get_unspent_coins(self.nodes[0], 1)[0] genesis_txout = CTxOut( int(genesis_coin['amount'] * UNIT), CScript(hex_str_to_bytes(genesis_coin['scriptPubKey']))) genesis_utxo = [ UTXO( 0, TxType.COINBASE, COutPoint(int(genesis_coin['txid'], 16), genesis_coin['vout']), genesis_txout) ] utxo_manager.available_outputs = genesis_utxo self.log.info("1. Have nodes mine a block (leave IBD)") [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) self.log.info( "2. Send one block that builds on each tip. This should be accepted by node0." ) blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 coin = get_unspent_coins(self.nodes[0], 1)[0] for i in range(2): coinbase = sign_coinbase( self.nodes[0], create_coinbase(2, coin, tip_snapshot_meta.hash)) blocks_h2.append(create_block(tips[i], coinbase, block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1" ) self.log.info("3. Send another block that builds on genesis.") coinbase = utxo_manager.get_coinbase(1, n_pieces=300) block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), coinbase, block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) utxo_manager.process(coinbase, 1) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) self.log.info("4. Send another two block that build on the fork.") coinbase = utxo_manager.get_coinbase(2) block_h2f = create_block(block_h1f.sha256, coinbase, block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) utxo_manager.process(coinbase, 2) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") self.log.info( "4b. Now send another block that builds on the forking chain.") coinbase = utxo_manager.get_coinbase(3) block_h3 = create_block(block_h2f.sha256, coinbase, block_h2f.nTime + 1) block_h3.solve() test_node.send_message(msg_block(block_h3)) utxo_manager.process(coinbase, 3) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") self.log.info("4c. Now mine 288 more blocks and deliver") # all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for height in range(4, 292): coinbase = utxo_manager.get_coinbase(height) next_block = create_block(tip.sha256, coinbase, tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block utxo_manager.process(coinbase, height) # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) self.log.info( "5. Test handling of unrequested block on the node that didn't process" ) # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) self.log.info("6. Try to get node to request the missing block.") # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") self.log.info( "7. Send the missing block for the third time (now it is requested)" ) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") self.log.info( "8. Create a chain which is invalid at a height longer than the") # current chain, but which has more blocks on top of that # Reset utxo managers to current state utxo_fork_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_fork_manager.available_outputs = utxo_manager.available_outputs utxo_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_manager.available_outputs = utxo_fork_manager.available_outputs # Create one block on top of the valid chain coinbase = utxo_manager.get_coinbase(291) valid_block = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) valid_block.solve() test_node.send_and_ping(msg_block(valid_block)) assert_equal(self.nodes[0].getblockcount(), 291) # Create three blocks on a fork, but make the second one invalid coinbase = utxo_fork_manager.get_coinbase(291) block_291f = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) block_291f.solve() utxo_fork_manager.process(coinbase, 291) coinbase = utxo_fork_manager.get_coinbase(292) block_292f = create_block(block_291f.sha256, coinbase, block_291f.nTime + 1) # block_292f spends a coinbase below maturity! block_292f.vtx.append( create_tx_with_script(block_291f.vtx[0], 0, script_sig=b"42", amount=1)) block_292f.compute_merkle_trees() block_292f.solve() utxo_fork_manager.process(coinbase, 292) utxo_fork_manager.process(block_292f.vtx[1], 292) coinbase = utxo_fork_manager.get_coinbase(293) block_293f = create_block(block_292f.sha256, coinbase, block_292f.nTime + 1) block_293f.solve() utxo_fork_manager.process(coinbase, 293) # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_291f)) headers_message.headers.append(CBlockHeader(block_292f)) headers_message.headers.append(CBlockHeader(block_293f)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_293f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_293f.hash) test_node.send_message(msg_block(block_291f)) test_node.sync_with_ping() self.nodes[0].getblock(block_291f.hash) test_node.send_message(msg_block(block_292f)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 291) assert_equal(self.nodes[0].getbestblockhash(), valid_block.hash) assert_equal(self.nodes[0].getblock(block_292f.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected coinbase = utxo_fork_manager.get_coinbase(294) block_294f = create_block(block_293f.sha256, coinbase, block_293f.nTime + 1) block_294f.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_294f)) test_node.send_message(headers_message) test_node.wait_for_disconnect() self.log.info( "9. Connect node1 to node0 and ensure it is able to sync") connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def run_test(self): # Nodes will only request hb compact blocks mode when they're out of IBD for node in self.nodes: assert not node.getblockchaininfo()['initialblockdownload'] p2p_conn_blocksonly = self.nodes[0].add_p2p_connection(P2PInterface()) p2p_conn_high_bw = self.nodes[1].add_p2p_connection(P2PInterface()) p2p_conn_low_bw = self.nodes[3].add_p2p_connection(P2PInterface()) for conn in [p2p_conn_blocksonly, p2p_conn_high_bw, p2p_conn_low_bw]: assert_equal(conn.message_count['sendcmpct'], 2) conn.send_and_ping(msg_sendcmpct(announce=False, version=2)) # Nodes: # 0 -> blocksonly # 1 -> high bandwidth # 2 -> miner # 3 -> low bandwidth # # Topology: # p2p_conn_blocksonly ---> node0 # p2p_conn_high_bw ---> node1 # p2p_conn_low_bw ---> node3 # node2 (no connections) # # node2 produces blocks that are passed to the rest of the nodes # through the respective p2p connections. self.log.info("Test that -blocksonly nodes do not select peers for BIP152 high bandwidth mode") block0 = self.build_block_on_tip() # A -blocksonly node should not request BIP152 high bandwidth mode upon # receiving a new valid block at the tip. p2p_conn_blocksonly.send_and_ping(msg_block(block0)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block0.sha256) assert_equal(p2p_conn_blocksonly.message_count['sendcmpct'], 2) assert_equal(p2p_conn_blocksonly.last_message['sendcmpct'].announce, False) # A normal node participating in transaction relay should request BIP152 # high bandwidth mode upon receiving a new valid block at the tip. p2p_conn_high_bw.send_and_ping(msg_block(block0)) assert_equal(int(self.nodes[1].getbestblockhash(), 16), block0.sha256) p2p_conn_high_bw.wait_until(lambda: p2p_conn_high_bw.message_count['sendcmpct'] == 3) assert_equal(p2p_conn_high_bw.last_message['sendcmpct'].announce, True) # Don't send a block from the p2p_conn_low_bw so the low bandwidth node # doesn't select it for BIP152 high bandwidth relay. self.nodes[3].submitblock(block0.serialize().hex()) self.log.info("Test that -blocksonly nodes send getdata(BLOCK) instead" " of getdata(CMPCT) in BIP152 low bandwidth mode") block1 = self.build_block_on_tip() p2p_conn_blocksonly.send_message(msg_headers(headers=[CBlockHeader(block1)])) p2p_conn_blocksonly.sync_send_with_ping() assert_equal(p2p_conn_blocksonly.last_message['getdata'].inv, [CInv(MSG_BLOCK | MSG_WITNESS_FLAG, block1.sha256)]) p2p_conn_high_bw.send_message(msg_headers(headers=[CBlockHeader(block1)])) p2p_conn_high_bw.sync_send_with_ping() assert_equal(p2p_conn_high_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)]) self.log.info("Test that getdata(CMPCT) is still sent on BIP152 low bandwidth connections" " when no -blocksonly nodes are involved") p2p_conn_low_bw.send_and_ping(msg_headers(headers=[CBlockHeader(block1)])) p2p_conn_low_bw.sync_with_ping() assert_equal(p2p_conn_low_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)]) self.log.info("Test that -blocksonly nodes still serve compact blocks") def test_for_cmpctblock(block): if 'cmpctblock' not in p2p_conn_blocksonly.last_message: return False return p2p_conn_blocksonly.last_message['cmpctblock'].header_and_shortids.header.rehash() == block.sha256 p2p_conn_blocksonly.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, block0.sha256)])) p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block0)) # Request BIP152 high bandwidth mode from the -blocksonly node. p2p_conn_blocksonly.send_and_ping(msg_sendcmpct(announce=True, version=2)) block2 = self.build_block_on_tip() self.nodes[0].submitblock(block1.serialize().hex()) self.nodes[0].submitblock(block2.serialize().hex()) p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block2))
def run_test(self): check_blockindex_decay = True dos_nodes = self.num_nodes dos_nodes = 1 nodes = self.nodes self.connect_nodes(0, 1) p2p_conns = [] for i in range(dos_nodes): p2p_conns.append(self.nodes[i].add_p2p_connection(TestP2PConn(2))) nodes[0].extkeyimportmaster( 'pact mammal barrel matrix local final lecture chunk wasp survey bid various book strong spread fall ozone daring like topple door fatigue limb olympic', '', 'true') nodes[0].getnewextaddress('lblExtTest') nodes[0].rescanblockchain() self.log.info('Generating blocks.') nodes[0].walletsettings('stakelimit', {'height': 20}) nodes[0].reservebalance(False) self.wait_for_height(nodes[0], 20, 2000) # Let the test nodes get in sync for i in range(dos_nodes): self.nodes[i].p2ps[0].wait_for_verack() MAX_HEADERS = 10 ITERATIONS = 200 block_count = nodes[0].getblockcount() pastBlockHash = nodes[0].getblockhash(block_count - MAX_HEADERS - 1) # In each iteration, send a `headers` message with the maximumal number of entries t = int(time.time() + 15) & 0xfffffff0 self.log.info('Initial blockindexsize: %d\n' % (nodes[0].getblockchaininfo()['blockindexsize'])) self.log.info('Generating lots of headers with no stake\n') sent = 0 for i in range(ITERATIONS): if i % 25 == 0: self.log.info('Iteration %d of %d sent, %d headers' % (i, ITERATIONS, MAX_HEADERS)) prevBlockHash = int(pastBlockHash, 16) blocks = [] for b in range(MAX_HEADERS): target_block_hash = nodes[0].getblockhash(block_count - MAX_HEADERS + b) block = self.create_block_header( nodes[0], hashPrevBlock=prevBlockHash, hashMerkleRoot=i, target_block_hash=target_block_hash) prevBlockHash = int(block.hash, 16) blocks.append(block) msg = msg_headers() msg.headers.extend(blocks) sent += len(blocks) # time.sleep(0.2) for i in range(dos_nodes): p2p_conns[i].send_message(msg) time.sleep(2) self.log.info('\nChecking how many headers were stored') self.log.info('Number of headers sent: %d' % (sent)) self.log.info('blockindexsize: %d' % (nodes[0].getblockchaininfo()['blockindexsize'])) log_path = self.options.tmpdir + '/node0/regtest/debug.log' self.log.info('Reading log file: ' + log_path) found_error_line = False found_misbehave_line = False with open(log_path, 'r', encoding='utf8') as fp: for line in fp: if not found_error_line and line.find( 'ERROR: AcceptBlockHeader: DoS limits') > -1: found_error_line = True self.log.info('Found line in log: ' + line) if not found_misbehave_line and line.find( '): invalid header received') > -1: found_misbehave_line = True self.log.info('Found line in log: ' + line) if found_error_line and found_misbehave_line: break assert (found_error_line) assert (found_misbehave_line) peer_info = nodes[0].getpeerinfo() assert (peer_info[1]['loose_headers'] >= 200) assert (peer_info[1]['banscore'] > 100) # Verify node under DOS isn't forwarding bad headers peer_info1 = nodes[1].getpeerinfo() assert (peer_info1[0]['loose_headers'] == 0) assert (peer_info1[0]['banscore'] == 0) if check_blockindex_decay: self.log.info('Waiting for unfilled headers to decay') for i in range(10): time.sleep(20) index_size = nodes[0].getblockchaininfo()['blockindexsize'] self.log.info('waiting %d, blockindexsize: %d' % (i, index_size)) if index_size <= 21: break assert (nodes[0].getblockchaininfo()['blockindexsize'] == 21) self.log.info('Reading log file: ' + log_path) found_misbehave_line = False with open(log_path, 'r', encoding='utf8') as fp: for line in fp: if line.find('Block not received') > -1: found_misbehave_line = True self.log.info('Found line in log: ' + line) break assert (found_misbehave_line) self.log.info('Replace headers for next test') t = int(time.time() + 15) & 0xfffffff0 self.log.info('Initial blockindexsize: %d\n' % (nodes[0].getblockchaininfo()['blockindexsize'])) self.log.info('Generating lots of headers with no stake\n') sent = 0 for i in range(ITERATIONS): if i % 25 == 0: self.log.info('Iteration %d of %d sent, %d headers' % (i, ITERATIONS, MAX_HEADERS)) prevBlockHash = int(pastBlockHash, 16) blocks = [] for b in range(MAX_HEADERS): target_block_hash = nodes[0].getblockhash(block_count - MAX_HEADERS + b) block = self.create_block_header( nodes[0], hashPrevBlock=prevBlockHash, hashMerkleRoot=i, target_block_hash=target_block_hash) prevBlockHash = int(block.hash, 16) blocks.append(block) msg = msg_headers() msg.headers.extend(blocks) sent += len(blocks) # time.sleep(0.2) for i in range(dos_nodes): p2p_conns[i].send_message(msg) self.log.info('Number of headers sent: %d' % (sent)) self.log.info('blockindexsize: %d' % (nodes[0].getblockchaininfo()['blockindexsize'])) self.log.info( 'Restart and check how many block headers were saved to disk') self.stop_node(0) self.start_node(0, self.extra_args[0] + [ '-wallet=default_wallet', ]) time.sleep(2) self.connect_nodes(0, 1) self.log.info('After restart blockindexsize: %d' % (nodes[0].getblockchaininfo()['blockindexsize'])) assert (nodes[0].getblockchaininfo()['blockindexsize'] == 21) self.log.info('sending many duplicate headers\n\n') self.nodes[0].add_p2p_connection(p2p_conns[0], wait_for_verack=False) for i in range(dos_nodes): self.nodes[i].p2ps[0].wait_for_verack() self.log.info("Initial blockindexsize: %d\n" % (nodes[0].getblockchaininfo()['blockindexsize'])) DUPLICATE_ITERATIONS = 3000 target_block_hash = nodes[0].getblockhash(20) block = self.get_block_header(nodes[0], target_block_hash=target_block_hash) prevBlockHash = int(block.hash, 16) sent = 0 for i in range(DUPLICATE_ITERATIONS): if i % 250 == 0: self.log.info('Iteration %d of %d, sent %d duplicate headers' % (i, DUPLICATE_ITERATIONS, sent)) blocks = [] blocks.append(block) msg = msg_headers() msg.headers.extend(blocks) sent += len(blocks) # time.sleep(0.2) for i in range(dos_nodes): p2p_conns[i].send_message(msg) time.sleep(2) self.log.info("blockindexsize: %d\n" % (nodes[0].getblockchaininfo()['blockindexsize'])) self.log.info('Reading log file: ' + log_path) found_dos_line = False with open(log_path, 'r', encoding='utf8') as fp: for line in fp: if line.find('Too many duplicates') > -1: found_dos_line = True self.log.info('Found line in log: ' + line) break assert (found_dos_line)
def run_test(self): # Setup the p2p connections # node_with_finalheaders connects to node0 node_with_finalheaders = self.nodes[0].add_p2p_connection(P2PInterface()) # node_without_finalheaders connects to node1 node_without_finalheaders = self.nodes[1].add_p2p_connection(P2PInterface()) genesis_hash = [n.getbestblockhash() for n in self.nodes] assert_equal(genesis_hash[0], genesis_hash[1]) assert_equal(self.nodes[0].getblockcount(), 0) assert_equal(self.nodes[1].getblockcount(), 0) # Have nodes mine enough blocks to get them to finalize for i in range(2 * DEFAULT_MAXREORGDEPTH + 1): [self.generatetoaddress(n, 1, n.get_deterministic_priv_key().address) for n in self.nodes] assert_equal(self.nodes[0].getblockcount(), i + 1) assert_equal(self.nodes[1].getblockcount(), i + 1) assert_equal(self.nodes[0].getblockcount(), 2 * DEFAULT_MAXREORGDEPTH + 1) assert_equal(self.nodes[1].getblockcount(), 2 * DEFAULT_MAXREORGDEPTH + 1) # Finalized block's height is now 10 def construct_header_for(node, height, time_stamp): parent_hash = node.getblockhash(height - 1) return mine_header(parent_hash, create_coinbase(height), time_stamp) # For both nodes: # Replacement headers for block from tip down to last # non-finalized block should be accepted. block_time = int(time.time()) node_0_blockheight = self.nodes[0].getblockcount() node_1_blockheight = self.nodes[1].getblockcount() for i in range(1, DEFAULT_MAXREORGDEPTH): # Create a header for node 0 and submit it headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[0], node_0_blockheight - i, block_time)) node_with_finalheaders.send_and_ping(headers_message) # Create a header for node 1 and submit it headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[1], node_1_blockheight - i, block_time)) node_without_finalheaders.send_and_ping(headers_message) # Both nodes remain connected in this loop because # the new headers do not attempt to replace the finalized block assert node_with_finalheaders.is_connected assert node_without_finalheaders.is_connected # Now, headers that would replace the finalized block... # The header-finalizing node should reject the deeper header # and get a DoS score of 50 while the non-header-finalizing node # will accept the header. headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[0], node_0_blockheight - DEFAULT_MAXREORGDEPTH - 1, block_time)) # Node 0 has not yet been disconnected, but it got a rejection logged and penalized expected_header_rejection_msg = ["peer=0 (0 -> 50) reason: bad-header-finalization", ] with self.nodes[0].assert_debug_log(expected_msgs=expected_header_rejection_msg, timeout=10): node_with_finalheaders.send_and_ping(headers_message) # The long sleep below is for GitLab CI. # On local modern test machines a sleep of 1 second worked # very reliably. time.sleep(4) assert node_with_finalheaders.is_connected headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[1], node_0_blockheight - DEFAULT_MAXREORGDEPTH - 1, block_time)) node_without_finalheaders.send_message(headers_message) time.sleep(1) assert node_without_finalheaders.is_connected # Now, one more header on both... # The header-finalizing node should disconnect while the # non-header-finalizing node will accept the header. headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[0], node_0_blockheight - DEFAULT_MAXREORGDEPTH - 1, block_time)) # Node 0 should disconnect when we send again expected_header_rejection_msg = ["peer=0 (50 -> 100) reason: bad-header-finalization", ] with self.nodes[0].assert_debug_log(expected_msgs=expected_header_rejection_msg, timeout=10): node_with_finalheaders.send_message(headers_message) # Again, a long sleep below only for GitLab CI. time.sleep(4) assert not node_with_finalheaders.is_connected headers_message = msg_headers() headers_message.headers.append(construct_header_for(self.nodes[1], node_0_blockheight - DEFAULT_MAXREORGDEPTH - 1, block_time)) node_without_finalheaders.send_message(headers_message) time.sleep(1) assert node_without_finalheaders.is_connected
def send_header(self, block): msg = msg_headers() msg.headers = [CBlockHeader(block)] self.send_message(msg)
def run_test(self): node0 = self.nodes[0].add_p2p_connection(P2PInterface()) # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) # Generating a chain of 10 blocks block_hashes = self.nodes[0].generatetoaddress( 10, self.nodes[0].get_deterministic_priv_key().address) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain node0.send_message(msg_headers(new_blocks)) node0.wait_for_getdata([x.sha256 for x in new_blocks]) for block in new_blocks: node0.send_and_ping(msg_block(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) def test_function(): return self.last_block_equals(stale_hash, node0) self.wait_until(test_function, timeout=3) # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) def test_function(): return self.last_header_equals(stale_hash, node0) self.wait_until(test_function, timeout=3) # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generatetoaddress( 1, self.nodes[0].get_deterministic_priv_key().address)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) def test_function(): return self.last_block_equals(block_hash, node0) self.wait_until(test_function, timeout=3) self.send_header_request(block_hash, node0) def test_function(): return self.last_header_equals(block_hash, node0) self.wait_until(test_function, timeout=3)
def _find_best_chain_on_unconnected_block(self): self.log.warning("starting _find_best_chain_on_unconnected_block()") lastblock = self.nodes[0].getblockcount() candidates = [] for i in range(self.orphans_to_generate): addr1 = self.nodes[0].getnewaddress() hash = self.nodes[0].generatetoaddress(nblocks=1, address=addr1)[-1] candidates.append(hash) self.invalidatedheight = lastblock + 1 self.invalidated = self.nodes[0].getblockhash( self.invalidatedheight) self.nodes[0].invalidateblock(self.invalidated) new_lastblock = self.nodes[0].getblockcount() assert new_lastblock == lastblock for c in candidates: self.nodes[0].reconsiderblock(c) self.log.info("node0 generated {} orphans".format( self.orphans_to_generate)) assert self.get_best_block(self.nodes[0])['height'] == lastblock + 1 compares_before = self.nodes[0].getpopscorestats( )['stats']['popScoreComparisons'] # connect to fake node self.bn = BaseNode(self.log) self.nodes[0].add_p2p_connection(self.bn) # generate 2 blocks to send from the fake node block_to_connect_hash = self.nodes[0].getblockhash(lastblock) block_to_connect = self.nodes[0].getblock(block_to_connect_hash) tip = int(block_to_connect_hash, 16) height = block_to_connect["height"] + 1 block_time = block_to_connect["time"] + 1 block1 = create_block(self.popctx, tip, create_coinbase(height), block_time) block1.solve() self.missing_block = block1 headers_message = msg_headers() headers_message.headers = [CBlockHeader(block1)] self.nodes[0].p2p.send_and_ping(headers_message) self.popctx.accept_block(height, block1.hash, block_to_connect_hash) tip = int(block1.hash, 16) height = height + 1 block_time = block_time + 1 block2 = create_block(self.popctx, tip, create_coinbase(height + 1), block_time + 1) block2.solve() self.connecting_block = block2 block_message = msg_block(block2) self.nodes[0].p2p.send_and_ping(block_message) prevbest = self.nodes[0].getblockhash(lastblock + 1) newbest = self.nodes[0].getbestblockhash() assert newbest == prevbest, "bad tip. \n\tExpected : {}\n\tGot : {}".format( prevbest, newbest) compares_after = self.nodes[0].getpopscorestats( )['stats']['popScoreComparisons'] test_comparisons = compares_after - compares_before assert test_comparisons == 0, "Expected {} comparisons, got {}".format( self.orphans_to_generate, test_comparisons) self.log.info( "node0 made {} POP score comparisons".format(test_comparisons)) assert self.get_best_block(self.nodes[0])['height'] == lastblock + 1 self.log.warning("_find_best_chain_on_unconnected_block() succeeded!")