def run_test(self): # Connect to node0 node0 = BaseNode() connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) ] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread node0.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = ECKey() coinbase_key.generate() coinbase_pubkey = coinbase_key.get_pubkey().get_bytes() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.x16r height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.x16r self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].x16r, 0), script_sig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_x16r() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.x16r self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.x16r self.block_time += 1 height += 1 # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.x16r)]) node1 = BaseNode() # connects to node1 connections.append( NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) node1.add_connection(connections[1]) node1.wait_for_verack() self.start_node(2, extra_args=["-assumevalid=" + hex(block102.x16r)]) node2 = BaseNode() # connects to node2 connections.append( NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) node2.add_connection(connections[2]) node2.wait_for_verack() # send header lists to all three nodes node0.send_header_for_blocks(self.blocks[0:2000]) node0.send_header_for_blocks(self.blocks[2000:]) node1.send_header_for_blocks(self.blocks[0:2000]) node1.send_header_for_blocks(self.blocks[2000:]) node2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(node0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): node1.send_message(MsgBlock(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. node1.sync_with_ping(120) assert_equal( self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(node2) self.assert_blockchain_height(self.nodes[2], 101)
def run_test(self): # Setup the p2p connections and start up the network thread. test_node = NodeConnCB() # connects to node0 (not whitelisted) white_node = NodeConnCB() # connects to node1 (whitelisted) min_work_node = NodeConnCB() # connects to node2 (not whitelisted) connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node), NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node), NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)] test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) min_work_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (nodes1/2 leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted by nodes 1/2 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(3): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(MsgBlock(blocks_h2[0])) white_node.send_message(MsgBlock(blocks_h2[1])) min_work_node.send_message(MsgBlock(blocks_h2[2])) for x in [test_node, white_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) assert_equal(self.nodes[2].getblockcount(), 1) self.log.info("First height 2 block accepted by node0/node1; correctly rejected by node2") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(MsgBlock(blocks_h2f[0])) white_node.send_message(MsgBlock(blocks_h2f[1])) for x in [test_node, white_node]: x.sync_with_ping() for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") self.log.info("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(MsgBlock(blocks_h3[0])) white_node.send_message(MsgBlock(blocks_h3[1])) for x in [test_node, white_node]: x.sync_with_ping() # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) self.log.info("Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) self.log.info("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = MsgHeaders() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(MsgBlock(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(MsgBlock(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(MsgBlock(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(MsgBlock(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") # 8. Connect node2 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 2) sync_blocks([self.nodes[0], self.nodes[2]]) self.log.info("Successfully synced nodes 2 and 0") [ c.disconnect_node() for c in connections ]
def run_test(self): node0 = NodeConnCB() connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) ] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal(node0.last_message["reject"].data, block.sha256) del node0.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 4 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). node0.send_and_ping(MsgTx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert node0.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(node0.last_message["reject"].data, block.sha256) if node0.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(node0.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in node0.last_message[ "reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): # Setup the p2p connections and start up the network thread. inv_node = TestNode() test_node = TestNode() self.p2p_connections = [inv_node, test_node] connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node), NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0) ] # Set nServices to 0 for test_node, so no block download will occur outside of # direct fetching inv_node.add_connection(connections[0]) test_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here inv_node.wait_for_verack() test_node.wait_for_verack() # Ensure verack's have been processed by our peer inv_node.sync_with_ping() test_node.sync_with_ping() tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") block_time = 0 for i in range(4): old_tip = tip tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) # Try a few different responses; none should affect next announcement if i == 0: # first request the block test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.get_headers(locator=[old_tip], hashstop=tip) test_node.get_data([tip]) test_node.wait_for_block(tip) test_node.clear_last_announcement( ) # since we requested headers... elif i == 2: # this time announce own block via headers height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 new_block = create_block(tip, create_coinbase(height + 1), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(MsgBlock(new_block)) test_node.sync_with_ping() # make sure this block is processed inv_node.clear_last_announcement() test_node.clear_last_announcement() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message..." ) # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(MsgSendHeaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead for i in range(10): # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): blocks = [] for _ in range(i + 1): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." ) # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): # First try mining a reorg that can propagate with header announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal( test_node.check_last_announcement(headers=new_block_hashes), True) block_time += 8 # Mine a too-large reorg, which should be announced with a single inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) block_time += 9 fork_point = self.nodes[0].getblock( "%02x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) assert_equal( test_node.check_last_announcement(inv=new_block_hashes), True) test_node.get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): # Mine another block, still should get only an inv tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) if i == 0: # Just get the data -- shouldn't cause headers announcements to resume test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 2: test_node.get_data([tip]) test_node.wait_for_block(tip) # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. if j == 0: test_node.get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for _ in range(2): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(MsgBlock(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for _ in range(3): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=int(direct_fetch_response_time)) [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 1 blocks = [] # Create extra blocks for later for _ in range(20): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=int(direct_fetch_response_time)) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=int(direct_fetch_response_time)) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(MsgBlock(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message
def run_test(self): node0 = NodeConnCB() connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) ] node0.add_connection(connections[0]) NetworkThread().start() node0.wait_for_verack() # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6) # Generating a chain of 10 blocks block_hashes = self.nodes[0].generate(nblocks=10) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain node0.send_message(MsgHeaders(new_blocks)) node0.wait_for_getdata() for block in new_blocks: node0.send_and_ping(MsgBlock(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 1") # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 2") # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generate(nblocks=1)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 3") self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) wait_until(test_function, timeout=3, err_msg="test_function 4")
def test_incorrect_blocktxn_response(self, node, test_node, version): if len(self.utxos) == 0: self.make_utxos() utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: test_node.send_message(MsgTx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert (tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p())) with mininode_lock: assert ("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message[ "getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. # Note that it's possible for ravend to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change were made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = MsgBlockTxn() if version == 2: msg = MsgWitnessBlocktxn() msg.block_transactions = BlockTransactions( block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock, err_msg="test_node.last_message getdata") assert_equal(len(test_node.last_message["getdata"].inv), 1) assert (test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block if version == 2: test_node.send_and_ping(MsgWitnessBlock(block)) else: test_node.send_and_ping(MsgBlock(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self): node0 = NodeConnCB() connections = [ NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) ] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that a transaction with non-DER signature can still appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) un_der_ify(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) block.nVersion = 2 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000002)') assert_equal(node0.last_message["reject"].data, block.sha256) del node0.last_message["reject"] self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) un_der_ify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). node0.send_and_ping(MsgTx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: # We can receive different reject messages depending on whether # splendidd is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and splendidd produces more specific reject # reasons. assert node0.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(node0.last_message["reject"].data, block.sha256) if node0.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(node0.last_message["reject"].reason, b'block-validation-failed') else: assert b'Non-canonical DER signature' in node0.last_message[ "reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)