def run_test(self): node0 = NodeConnCB() connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) ] node0.add_connection(connections[0]) NetworkThread().start() node0.wait_for_verack() # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6) # Generating a chain of 10 blocks block_hashes = self.nodes[0].generate(nblocks=10) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain node0.send_message(MsgHeaders(new_blocks)) node0.wait_for_getdata() for block in new_blocks: node0.send_and_ping(MsgBlock(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 1") # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 2") # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generate(nblocks=1)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 3") self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 4")
def send_header_for_blocks(self, new_blocks): headers_message = MsgHeaders() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message)
def run_test(self): # Setup the p2p connections and start up the network thread. test_node = NodeConnCB() # connects to node0 (not whitelisted) white_node = NodeConnCB() # connects to node1 (whitelisted) min_work_node = NodeConnCB() # connects to node2 (not whitelisted) connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node), NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node), NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node) ] test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) min_work_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (nodes1/2 leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by nodes 1/2 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(3): blocks_h2.append( create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(MsgBlock(blocks_h2[0])) white_node.send_message(MsgBlock(blocks_h2[1])) min_work_node.send_message(MsgBlock(blocks_h2[2])) for x in [test_node, white_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) assert_equal(self.nodes[2].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0/node1; correctly rejected by node2" ) # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append( create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime + 1)) blocks_h2f[i].solve() test_node.send_message(MsgBlock(blocks_h2f[0])) white_node.send_message(MsgBlock(blocks_h2f[1])) for x in [test_node, white_node]: x.sync_with_ping() for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") self.log.info( "Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append( create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime + 1)) blocks_h3[i].solve() test_node.send_message(MsgBlock(blocks_h3[0])) white_node.send_message(MsgBlock(blocks_h3[1])) for x in [test_node, white_node]: x.sync_with_ping() # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) self.log.info( "Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) self.log.info( "Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = MsgHeaders() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime + 1) next_block.solve() if j == 0: test_node.send_message(MsgBlock(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(MsgBlock(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) self.log.info( "Unrequested block far ahead of tip accepted from whitelisted peer" ) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(MsgBlock(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(MsgBlock(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") # 8. Connect node2 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 2) sync_blocks([self.nodes[0], self.nodes[2]]) self.log.info("Successfully synced nodes 2 and 0") [c.disconnect_node() for c in connections]