def reconnect_p2p(self, **kwargs): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() network_thread_join() self.bootstrap_p2p(**kwargs)
def run_test(self): """Main test logic""" # Create P2P connections to two of the nodes self.nodes[0].add_p2p_connection(BaseNode()) # Start up network handling in another thread. This needs to be called # after the P2P connections have been created. network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(1)[0], 16)] self.sync_all([self.nodes[0:1]]) # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 height = 1 for i in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our P2PInterface self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info("Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], 2) self.log.info("Add P2P connection to node2") # We can't add additional P2P connections once the network thread has started. Disconnect the connection # to node0, wait for the network thread to terminate, then connect to node2. This is specific to # the current implementation of the network thread and may be improved in future. self.nodes[0].disconnect_p2ps() network_thread_join() self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[2].p2p.wait_for_verack() self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1)
def run_test(self): # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101)
def run_test(self): # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 8400 deep (Cosanta needs 4x as much blocks to allow -assumevalid to work) for i in range(8400): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=self.extra_args + ["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=self.extra_args + ["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() # Make sure nodes actually accept the many headers self.mocktime = self.block_time set_node_times(self.nodes, self.mocktime) # send header lists to all three nodes. # node0 does not need to receive all headers # node1 must receive all headers as otherwise assumevalid is ignored in ConnectBlock # node2 should NOT receive all headers to force skipping of the assumevalid check in ConnectBlock p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:4000]) p2p1.send_header_for_blocks(self.blocks[4000:6000]) p2p1.send_header_for_blocks(self.blocks[6000:8000]) p2p1.send_header_for_blocks(self.blocks[8000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send 200 blocks to node1. All blocks, including block 102, will be accepted. for i in range(200): p2p1.send_message(msg_block(self.blocks[i])) # Syncing so many blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(300) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 200) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101)
def run_test(self): # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), height, self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 800 deep so the coinbase output is spendable for i in range(800): block = create_block(self.tip, create_coinbase(height), height, self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(24 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block802 = create_block(self.tip, create_coinbase(height), height, self.block_time) self.block_time += 1 block802.vtx.extend([tx]) block802.hashMerkleRoot = block802.calc_merkle_root() block802.rehash() block802.solve() self.blocks.append(block802) self.tip = block802.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100*8 deep for i in range(16800): block = create_block(self.tip, create_coinbase(height), height, self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block802.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block802.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal( self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101)
def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) network_thread_start() node.wait_for_verack() expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED self.log.info("Check that node has signalled expected services.") assert_equal(node.nServices, expected_services) self.log.info("Check that the localservices is as expected.") assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services) self.log.info( "Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") connect_nodes_bi(self.nodes, 0, 1) blocks = self.nodes[1].generate(292) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Make sure we can max retrieve block at tip-288.") node.send_getdata_for_block(blocks[1]) # last block in valid range node.wait_for_block(int(blocks[1], 16), timeout=3) self.log.info( "Requesting block at height 2 (tip-289) must fail (ignored).") node.send_getdata_for_block( blocks[0]) # first block outside of the 288+2 limit node.wait_for_disconnect(5) self.log.info("Check local address relay, do a fresh connection.") self.nodes[0].disconnect_p2ps() network_thread_join() node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) network_thread_start() node1.wait_for_verack() node1.send_message(msg_verack()) node1.wait_for_addr() #must relay address with NODE_NETWORK_LIMITED assert_equal(node1.firstAddrnServices, 1036) self.nodes[0].disconnect_p2ps() node1.wait_for_disconnect() # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible connect_nodes_bi(self.nodes, 0, 2) try: sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) except: pass # node2 must remain at heigh 0 assert_equal( self.nodes[2].getblockheader( self.nodes[2].getbestblockhash())['height'], 0) # now connect also to node 1 (non pruned) connect_nodes_bi(self.nodes, 1, 2) # sync must be possible sync_blocks(self.nodes) # disconnect all peers self.disconnect_all() # mine 10 blocks on node 0 (pruned node) self.nodes[0].generate(10) # connect node1 (non pruned) with node0 (pruned) and check if the can sync connect_nodes_bi(self.nodes, 0, 1) # sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED) sync_blocks([self.nodes[0], self.nodes[1]])
def run_test(self): # Create a block with 2500 stakeable outputs self.build_coins_to_stake() # Propagate it to nodes 1 and 2 and stop them for now self.sync_first_block() # Key Management for node 0 keytool = KeyTool.for_node(self.nodes[0]) # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = keytool.make_privkey() coinbase_pubkey = bytes(coinbase_key.get_pubkey()) keytool.upload_key(coinbase_key) self.log.info("Create the first block with a coinbase output to our key") height = 2 snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coin = self.get_coin_to_stake() coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 utxo1 = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 self.log.info("Bury the block 100 deep so the coinbase output is spendable") for i in range(100): coin = self.get_coin_to_stake() coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 utxo = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 self.log.info("Create a transaction spending the coinbase output with an invalid (null) signature") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut((PROPOSER_REWARD - 1) * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() coin = self.get_coin_to_stake() coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block102 = create_block(self.tip, coinbase, self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.compute_merkle_trees() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) utxo2 = UTXO(height, tx.get_type(), COutPoint(tx.sha256, 0), tx.vout[0]) snapshot_meta = calc_snapshot_hash(self.nodes[0], snapshot_meta, height, [utxo1], [utxo2]) height += 1 self.log.info("Bury the assumed valid block 2100 deep") for i in range(2100): coin = self.get_coin_to_stake() coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 utxo = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() self.log.info("Start node1 and node2 with assumevalid so they accept a block with a bad signature.") self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() self.log.info("send header lists to all three nodes") p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) self.log.info("Send blocks to node0. Block 103 will be rejected.") self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 102) self.log.info("Send all blocks to node1. All blocks will be accepted.") for i in range(2202): p2p1.send_message(msg_witness_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2203) self.log.info("Send blocks to node2. Block 102 will be rejected.") self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 102)
def run_test(self): # Setup the p2p connections and start up the network thread. # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) network_thread_start() # Test logic begins here test_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append( create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1" ) # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert (tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert (tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert (tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as its not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() network_thread_join() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime + 1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime + 1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime + 1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert (tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime + 1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def test_request_limit(): def test_send_from_two_to_one(send_1, expected_score_1, send_2, expected_score_2, clear_requests=False): if clear_requests: force_request_expire() if send_1: p2p_mn3_1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) if send_2: p2p_mn3_2.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) wait_for_banscore(mn3.node, id_p2p_mn3_1, expected_score_1) wait_for_banscore(mn3.node, id_p2p_mn3_2, expected_score_2) self.log.info("Test request limiting / banscore increases") p2p_mn1 = p2p_connection(mn1.node) network_thread_start() p2p_mn1.wait_for_verack() id_p2p_mn1 = get_mininode_id(mn1.node) mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1]) p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) wait_for_banscore(mn1.node, id_p2p_mn1, 0) force_request_expire( 299 ) # This shouldn't clear requests, next request should bump score p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) wait_for_banscore(mn1.node, id_p2p_mn1, 25) force_request_expire( 1 ) # This should clear the requests now, next request should not bump score p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) wait_for_banscore(mn1.node, id_p2p_mn1, 25) mn1.node.disconnect_p2ps() network_thread_join() # Requesting one QDATA with mn1 and mn2 from mn3 should not result # in banscore increase for either of both. p2p_mn3_1 = p2p_connection(mn3.node, uacomment_m3_1) p2p_mn3_2 = p2p_connection(mn3.node, uacomment_m3_2) network_thread_start() p2p_mn3_1.wait_for_verack() p2p_mn3_2.wait_for_verack() id_p2p_mn3_1 = get_mininode_id(mn3.node, uacomment_m3_1) id_p2p_mn3_2 = get_mininode_id(mn3.node, uacomment_m3_2) assert id_p2p_mn3_1 != id_p2p_mn3_2 mnauth(mn3.node, id_p2p_mn3_1, fake_mnauth_1[0], fake_mnauth_1[1]) mnauth(mn3.node, id_p2p_mn3_2, fake_mnauth_2[0], fake_mnauth_2[1]) # Now try some {mn1, mn2} - QGETDATA -> mn3 combinations to make # sure request limit works connection based test_send_from_two_to_one(False, 0, True, 0, True) test_send_from_two_to_one(True, 0, True, 25) test_send_from_two_to_one(True, 25, False, 25) test_send_from_two_to_one(False, 25, True, 25, True) test_send_from_two_to_one(True, 25, True, 50) test_send_from_two_to_one(True, 50, True, 75) test_send_from_two_to_one(True, 50, True, 75, True) test_send_from_two_to_one(True, 75, False, 75) test_send_from_two_to_one(False, 75, True, None) # mn1 should still have a score of 75 wait_for_banscore(mn3.node, id_p2p_mn3_1, 75) # mn2 should be "banned" now wait_until(lambda: not p2p_mn3_2.is_connected, timeout=10) mn3.node.disconnect_p2ps() network_thread_join()
def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) network_thread_start() node.wait_for_verack() expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED self.log.info("Check that node has signalled expected services.") assert_equal(node.nServices, expected_services) self.log.info("Check that the localservices is as expected.") assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services) self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.") connect_nodes_bi(self.nodes, 0, 1) blocks = self.nodes[1].generate(292) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Make sure we can max retrieve block at tip-288.") node.send_getdata_for_block(blocks[1]) # last block in valid range node.wait_for_block(int(blocks[1], 16), timeout=3) self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).") node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit node.wait_for_disconnect(5) self.log.info("Check local address relay, do a fresh connection.") self.nodes[0].disconnect_p2ps() network_thread_join() node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) network_thread_start() node1.wait_for_verack() node1.send_message(msg_verack()) node1.wait_for_addr() #must relay address with NODE_NETWORK_LIMITED assert_equal(node1.firstAddrnServices, 1036) self.nodes[0].disconnect_p2ps() node1.wait_for_disconnect() # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible connect_nodes_bi(self.nodes, 0, 2) try: sync_blocks([self.nodes[0], self.nodes[2]], timeout=5) except: pass # node2 must remain at heigh 0 assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0) # now connect also to node 1 (non pruned) connect_nodes_bi(self.nodes, 1, 2) # sync must be possible sync_blocks(self.nodes) # disconnect all peers self.disconnect_all() # mine 10 blocks on node 0 (pruned node) self.nodes[0].generate(10) # connect node1 (non pruned) with node0 (pruned) and check if the can sync connect_nodes_bi(self.nodes, 0, 1) # sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED) sync_blocks([self.nodes[0], self.nodes[1]])
def test_basics(): self.log.info("Testing basics of QGETDATA/QDATA") p2p_node0 = p2p_connection(node0) p2p_mn1 = p2p_connection(mn1.node) network_thread_start() p2p_node0.wait_for_verack() p2p_mn1.wait_for_verack() id_p2p_node0 = get_mininode_id(node0) id_p2p_mn1 = get_mininode_id(mn1.node) # Ensure that both nodes start with zero ban score wait_for_banscore(node0, id_p2p_node0, 0) wait_for_banscore(mn1.node, id_p2p_mn1, 0) self.log.info("Check that normal node doesn't respond to qgetdata " "and does bump our score") p2p_node0.test_qgetdata(qgetdata_all, response_expected=False) wait_for_banscore(node0, id_p2p_node0, 10) # The masternode should not respond to qgetdata for non-masternode connections self.log.info("Check that masternode doesn't respond to " "non-masternode connection. Doesn't bump score.") p2p_mn1.test_qgetdata(qgetdata_all, response_expected=False) wait_for_banscore(mn1.node, id_p2p_mn1, 10) # Open a fake MNAUTH authenticated P2P connection to the masternode to allow qgetdata node0.disconnect_p2ps() mn1.node.disconnect_p2ps() network_thread_join() p2p_mn1 = p2p_connection(mn1.node) network_thread_start() p2p_mn1.wait_for_verack() id_p2p_mn1 = get_mininode_id(mn1.node) mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1]) # The masternode should now respond to qgetdata requests self.log.info("Request verification vector") p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0) wait_for_banscore(mn1.node, id_p2p_mn1, 0) # Note: our banscore is bumped as we are requesting too rapidly, # however the node still returns the data self.log.info("Request encrypted contributions") p2p_mn1.test_qgetdata(qgetdata_contributions, 0, 0, self.llmq_size) wait_for_banscore(mn1.node, id_p2p_mn1, 25) # Request both # Note: our banscore is bumped as we are requesting too rapidly, # however the node still returns the data self.log.info("Request both") p2p_mn1.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size) wait_for_banscore(mn1.node, id_p2p_mn1, 50) mn1.node.disconnect_p2ps() network_thread_join() self.log.info( "Test ban score increase for invalid / unexpected QDATA") p2p_mn1 = p2p_connection(mn1.node) p2p_mn2 = p2p_connection(mn2.node) network_thread_start() p2p_mn1.wait_for_verack() p2p_mn2.wait_for_verack() id_p2p_mn1 = get_mininode_id(mn1.node) id_p2p_mn2 = get_mininode_id(mn2.node) mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1]) mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1]) wait_for_banscore(mn1.node, id_p2p_mn1, 0) p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size) qdata_valid = p2p_mn2.get_qdata() # - Not requested p2p_mn1.send_message(qdata_valid) time.sleep(1) wait_for_banscore(mn1.node, id_p2p_mn1, 10) # - Already received force_request_expire() assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash) p2p_mn1.wait_for_qgetdata() p2p_mn1.send_message(qdata_valid) time.sleep(1) p2p_mn1.send_message(qdata_valid) wait_for_banscore(mn1.node, id_p2p_mn1, 20) # - Not like requested force_request_expire() assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash) p2p_mn1.wait_for_qgetdata() qdata_invalid_request = qdata_valid qdata_invalid_request.data_mask = 2 p2p_mn1.send_message(qdata_invalid_request) wait_for_banscore(mn1.node, id_p2p_mn1, 30) # - Invalid verification vector force_request_expire() assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash) p2p_mn1.wait_for_qgetdata() qdata_invalid_vvec = qdata_valid qdata_invalid_vvec.quorum_vvec.pop() p2p_mn1.send_message(qdata_invalid_vvec) wait_for_banscore(mn1.node, id_p2p_mn1, 40) # - Invalid contributions force_request_expire() assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash) p2p_mn1.wait_for_qgetdata() qdata_invalid_contribution = qdata_valid qdata_invalid_contribution.enc_contributions.pop() p2p_mn1.send_message(qdata_invalid_contribution) wait_for_banscore(mn1.node, id_p2p_mn1, 50) mn1.node.disconnect_p2ps() mn2.node.disconnect_p2ps() network_thread_join() self.log.info("Test all available error codes") p2p_mn1 = p2p_connection(mn1.node) network_thread_start() p2p_mn1.wait_for_verack() id_p2p_mn1 = get_mininode_id(mn1.node) mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1]) qgetdata_invalid_type = msg_qgetdata(quorum_hash_int, 103, 0x01, protx_hash_int) qgetdata_invalid_block = msg_qgetdata(protx_hash_int, 100, 0x01, protx_hash_int) qgetdata_invalid_quorum = msg_qgetdata( int(mn1.node.getblockhash(0), 16), 100, 0x01, protx_hash_int) qgetdata_invalid_no_member = msg_qgetdata(quorum_hash_int, 100, 0x02, quorum_hash_int) p2p_mn1.test_qgetdata(qgetdata_invalid_type, QUORUM_TYPE_INVALID) p2p_mn1.test_qgetdata(qgetdata_invalid_block, QUORUM_BLOCK_NOT_FOUND) p2p_mn1.test_qgetdata(qgetdata_invalid_quorum, QUORUM_NOT_FOUND) p2p_mn1.test_qgetdata(qgetdata_invalid_no_member, MASTERNODE_IS_NO_MEMBER) # The last two error case require the node to miss its DKG data so we just reindex the node. mn1.node.disconnect_p2ps() network_thread_join() self.restart_mn(mn1, reindex=True) # Re-connect to the masternode p2p_mn1 = p2p_connection(mn1.node) p2p_mn2 = p2p_connection(mn2.node) network_thread_start() p2p_mn1.wait_for_verack() p2p_mn2.wait_for_verack() id_p2p_mn1 = get_mininode_id(mn1.node) id_p2p_mn2 = get_mininode_id(mn2.node) assert id_p2p_mn1 is not None assert id_p2p_mn2 is not None mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1]) mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1]) # Validate the DKG data is missing p2p_mn1.test_qgetdata(qgetdata_vvec, QUORUM_VERIFICATION_VECTOR_MISSING) p2p_mn1.test_qgetdata(qgetdata_contributions, ENCRYPTED_CONTRIBUTIONS_MISSING) self.log.info("Test DKG data recovery with QDATA") # Now that mn1 is missing its DKG data try to recover it by querying the data from mn2 and then sending it # to mn1 with a direct QDATA message. # # mininode - QGETDATA -> mn2 - QDATA -> mininode - QDATA -> mn1 # # However, mn1 only accepts self requested QDATA messages, that's why we trigger mn1 - QGETDATA -> mininode # via the RPC command "quorum getdata". # # Get the required DKG data for mn1 p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size) # Trigger mn1 - QGETDATA -> p2p_mn1 assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash) # Wait until mn1 sent the QGETDATA to p2p_mn1 p2p_mn1.wait_for_qgetdata() # Send the QDATA received from mn2 to mn1 p2p_mn1.send_message(p2p_mn2.get_qdata()) # Now mn1 should have its data back! self.wait_for_quorum_data([mn1], 100, quorum_hash, recover=False) # Restart one more time and make sure data gets saved to db mn1.node.disconnect_p2ps() mn2.node.disconnect_p2ps() network_thread_join() self.restart_mn(mn1) self.wait_for_quorum_data([mn1], 100, quorum_hash, recover=False)
def test_cannot_sync_with_snapshot(self): """ This test creates the following nodes: 1. snap_node - snapshot node that is used as a helper node to generate the snapshot 2. helper_p2p - mini node that retrieves the content of the snapshot 3. full_snap_p2p - mini node that has full 2nd best snapshot 3. half_snap_p2p - mini node that has half of the best snapshot 4. no_snap_p2p - mini node that doesn't have snapshot 5. sync_node - the node which syncs with the snapshot """ snap_node = self.nodes[6] sync_node = self.nodes[7] self.start_node(snap_node.index) self.start_node(sync_node.index) self.setup_stake_coins(snap_node) # add 2nd best snapshot to full_snap_p2p snap_node.generatetoaddress(5 + 5 + 1, snap_node.getnewaddress('', 'bech32')) assert_equal(snap_node.getblockcount(), 11) wait_until(lambda: has_valid_snapshot(snap_node, 4), timeout=10) full_snap_p2p = sync_node.add_p2p_connection( WaitNode(), services=SERVICE_FLAGS_WITH_SNAPSHOT) no_snap_p2p = sync_node.add_p2p_connection(WaitNode()) for p2p in [full_snap_p2p, no_snap_p2p]: p2p.update_snapshot_from(snap_node) # add the best snapshot to half_snap_p2p snap_node.generatetoaddress(5, snap_node.getnewaddress('', 'bech32')) assert_equal(snap_node.getblockcount(), 16) wait_until(lambda: has_valid_snapshot(snap_node, 9), timeout=10) half_snap_p2p = sync_node.add_p2p_connection( WaitNode(), services=SERVICE_FLAGS_WITH_SNAPSHOT) half_snap_p2p.update_snapshot_from(snap_node) for p2p in [half_snap_p2p, full_snap_p2p, no_snap_p2p]: p2p.update_headers_and_blocks_from(snap_node) self.stop_node(snap_node.index) network_thread_start() # test 1. the node requests snapshot from peers that have service flag set full_snap_p2p.wait_for_verack() half_snap_p2p.wait_for_verack() no_snap_p2p.wait_for_verack() wait_until(lambda: full_snap_p2p.snapshot_header_requested, timeout=10) wait_until(lambda: half_snap_p2p.snapshot_header_requested, timeout=10) wait_until(lambda: half_snap_p2p.snapshot_chunk1_requested, timeout=10) assert full_snap_p2p.snapshot_header_requested is True assert half_snap_p2p.snapshot_header_requested is True assert no_snap_p2p.snapshot_header_requested is False assert full_snap_p2p.snapshot_chunk1_requested is False # didn't start asking for the 2nd best self.log.info('Service flag are correctly recognized') # test 2. the node can't receive the 2nd part of the snapshot half_snap_p2p.return_snapshot_chunk1 = True half_snap_p2p.on_getsnapshot(half_snap_p2p.last_getsnapshot_message) wait_until(lambda: half_snap_p2p.snapshot_chunk2_requested, timeout=10) assert_has_snapshot_on_disk( sync_node, half_snap_p2p.snapshot_header.snapshot_hash) wait_until(lambda: full_snap_p2p.snapshot_chunk1_requested, timeout=10) # fallback to 2nd best assert_no_snapshot_on_disk(sync_node, half_snap_p2p.snapshot_header.snapshot_hash) self.log.info('Node cannot receive 2nd half of the snapshot') # test 3. the node can't receive the parent block full_snap_p2p.return_snapshot_chunk1 = True full_snap_p2p.return_snapshot_chunk2 = True full_snap_p2p.on_getsnapshot(full_snap_p2p.last_getsnapshot_message) wait_until(lambda: full_snap_p2p.parent_block_requested, timeout=10) wait_until(lambda: no_snap_p2p.parent_block_requested, timeout=10) assert_has_snapshot_on_disk( sync_node, full_snap_p2p.snapshot_header.snapshot_hash) self.log.info( 'Node cannot receive parent block from already connected peers') # test 4. the node can't receive the parent block from new peers sync_node.disconnect_p2ps() network_thread_join() for p2p in [full_snap_p2p, no_snap_p2p]: p2p.snapshot_chunk1_requested = False p2p.snapshot_chunk2_requested = False p2p.parent_block_requested = False sync_node.add_p2p_connection(full_snap_p2p) sync_node.add_p2p_connection(no_snap_p2p) network_thread_start() full_snap_p2p.wait_for_verack() no_snap_p2p.wait_for_verack() wait_until(lambda: full_snap_p2p.parent_block_requested, timeout=10) wait_until(lambda: no_snap_p2p.parent_block_requested, timeout=10) assert full_snap_p2p.snapshot_chunk1_requested is False assert no_snap_p2p.snapshot_chunk1_requested is False assert_has_snapshot_on_disk( sync_node, full_snap_p2p.snapshot_header.snapshot_hash) self.log.info('Node cannot receive parent block from new peers') self.stop_node(sync_node.index) network_thread_join() self.log.info('test_cannot_sync_with_snapshot passed')
def test_sync_with_restarts(self): """ This test creates the following nodes: 1. snap_node - full node that has the the snapshot 2. snap_p2p - mini node that is used as a helper to retrieve the snapshot content 3. node - the node which syncs the snapshot 4. p2p - mini node that sends snapshot in stages """ snap_node = self.nodes[2] node = self.nodes[3] self.start_node(snap_node.index) self.start_node(node.index) self.setup_stake_coins(snap_node) # generate 2 epochs + 1 block to create the first finalized snapshot snap_node.generatetoaddress(5 + 5 + 1, snap_node.getnewaddress('', 'bech32')) assert_equal(snap_node.getblockcount(), 11) wait_until(lambda: has_valid_snapshot(snap_node, 4), timeout=10) # configure p2p to have snapshot header and parent block p2p = node.add_p2p_connection(WaitNode(), services=SERVICE_FLAGS_WITH_SNAPSHOT) p2p.update_snapshot_from(snap_node) p2p.update_headers_and_blocks_from(snap_node) network_thread_start() # test 1. the node can be restarted after it discovered the snapshot wait_until(lambda: p2p.snapshot_chunk1_requested, timeout=10) node.disconnect_p2ps() network_thread_join() self.restart_node(node.index) self.log.info( 'Node restarted successfully after it discovered the snapshot') # test 2. the node can be restarted after it downloaded half of the snapshot # and deletes it's partially downloaded snapshot p2p.return_snapshot_chunk1 = True node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT) network_thread_start() wait_until(lambda: p2p.snapshot_chunk2_requested, timeout=10) node.disconnect_p2ps() network_thread_join() assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash) self.restart_node(node.index) assert_no_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash) assert_equal( len(os.listdir(os.path.join(node.datadir, "regtest", "snapshots"))), 0) self.log.info( 'Node restarted successfully after it downloaded half of the snapshot' ) # test 3. the node can be restarted after it downloaded the full snapshot # and doesn't delete it p2p.return_snapshot_chunk2 = True node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT) network_thread_start() wait_until(lambda: p2p.parent_block_requested, timeout=10) node.disconnect_p2ps() network_thread_join() assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash) self.restart_node(node.index) assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash) self.log.info( 'Node restarted successfully after it downloaded the full snapshot' ) # test 4. the node can be restarted after it downloaded the parent block p2p.snapshot_header_requested = False p2p.snapshot_chunk1_requested = False p2p.snapshot_chunk2_requested = False p2p.return_parent_block = True node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT) network_thread_start() wait_until(lambda: node.getblockcount() == snap_node.getblockcount(), timeout=10) assert_chainstate_equal(node, snap_node) # node didn't request a new snapshot as it already downloaded the one assert_equal(p2p.snapshot_header_requested, False) assert_equal(p2p.snapshot_chunk1_requested, False) assert_equal(p2p.snapshot_chunk2_requested, False) node.disconnect_p2ps() network_thread_join() self.restart_node(node.index) self.restart_node(snap_node.index) assert_chainstate_equal(node, snap_node) assert_equal(node.listsnapshots(), snap_node.listsnapshots()) self.log.info( 'Node restarted successfully after it downloaded the parent block') # clean up test self.stop_node(snap_node.index) self.stop_node(node.index) self.log.info('test_sync_with_restarts passed')
def test_p2p_schema(self): """ This test creates the following nodes: 1. serving_node - full node that has the the snapshot 2. syncing_p2p - mini node that downloads snapshot from serving_node and tests the protocol 3. syncing_node - the node which starts with fast sync 4. serving_p2p - mini node that sends snapshot to syncing_node and tests the protocol """ serving_node = self.nodes[0] syncing_node = self.nodes[1] self.start_node(serving_node.index) self.start_node(syncing_node.index) self.setup_stake_coins(serving_node) # generate 2 epochs + 1 block to create the first finalized snapshot serving_node.generatetoaddress( 5 + 5 + 1, serving_node.getnewaddress('', 'bech32')) assert_equal(serving_node.getblockcount(), 11) wait_until(lambda: has_valid_snapshot(serving_node, 4), timeout=10) syncing_p2p = serving_node.add_p2p_connection(BaseNode()) serving_p2p = syncing_node.add_p2p_connection( BaseNode(), services=SERVICE_FLAGS_WITH_SNAPSHOT) # configure serving_p2p to have snapshot header and parent block serving_p2p.update_snapshot_from(serving_node) serving_p2p.update_headers_and_blocks_from(serving_node) network_thread_start() syncing_p2p.wait_for_verack() # test snapshot downloading in chunks syncing_p2p.send_message(msg_getsnaphead()) wait_until(lambda: syncing_p2p.snapshot_header.total_utxo_subsets > 0, timeout=10) chunks = math.ceil(syncing_p2p.snapshot_header.total_utxo_subsets / 2) for i in range(1, chunks + 1): getsnapshot = GetSnapshot( syncing_p2p.snapshot_header.snapshot_hash, len(syncing_p2p.snapshot_data), 2) syncing_p2p.send_message(msg_getsnapshot(getsnapshot)) snapshot_size = min(i * 2, syncing_p2p.snapshot_header.total_utxo_subsets) wait_until(lambda: len(syncing_p2p.snapshot_data) == snapshot_size, timeout=10) assert_equal(len(syncing_p2p.snapshot_data), syncing_p2p.snapshot_header.total_utxo_subsets) self.log.info('Snapshot was downloaded successfully') # validate the snapshot hash utxos = [] for subset in syncing_p2p.snapshot_data: for n in subset.outputs: out = COutPoint(subset.tx_id, n) utxo = UTXO(subset.height, subset.tx_type, out, subset.outputs[n]) utxos.append(utxo) inputs = bytes_to_hex_str(ser_vector([])) outputs = bytes_to_hex_str(ser_vector(utxos)) stake_modifier = "%064x" % syncing_p2p.snapshot_header.stake_modifier chain_work = bytes_to_hex_str( ser_uint256(syncing_p2p.snapshot_header.chain_work)) res = self.nodes[0].calcsnapshothash(inputs, outputs, stake_modifier, chain_work) snapshot_hash = uint256_from_hex(res['hash']) assert_equal(snapshot_hash, syncing_p2p.snapshot_header.snapshot_hash) self.log.info('Snapshot was validated successfully') # test snapshot serving wait_until(lambda: serving_p2p.snapshot_requested, timeout=10) snapshot = Snapshot( snapshot_hash=serving_p2p.snapshot_header.snapshot_hash, utxo_subset_index=0, utxo_subsets=syncing_p2p.snapshot_data, ) serving_p2p.send_message(msg_snapshot(snapshot)) wait_until(lambda: syncing_node.getblockcount() == 11, timeout=10) assert_equal(serving_node.gettxoutsetinfo(), syncing_node.gettxoutsetinfo()) self.log.info('Snapshot was sent successfully') # clean up test serving_node.disconnect_p2ps() syncing_node.disconnect_p2ps() network_thread_join() self.stop_node(serving_node.index) self.stop_node(syncing_node.index) self.log.info('test_p2p_schema passed')
def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() # Get to one block of the May 15, 2018 HF activation for i in range(6): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) # Send it all to the node at once. yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # There can be only one network thread running at a time. # Adding a new P2P connection here will try to start the network thread # at init, which will throw an assertion because it's already running. # This requires a few steps to avoid this: # 1/ Disconnect all the TestManager nodes # 2/ Terminate the network thread # 3/ Add the new P2P connection # 4/ Reconnect all the TestManager nodes # 5/ Restart the network thread # Disconnect all the TestManager nodes [n.disconnect_node() for n in self.test.p2p_connections] self.test.wait_for_disconnections() self.test.clear_all_connections() # Wait for the network thread to terminate network_thread_join() # Add the new connection node = self.nodes[0] node.add_p2p_connection(TestNode()) # Reconnect TestManager nodes self.test.add_all_connections(self.nodes) # Restart the network thread network_thread_start() # Wait for connection to be etablished peer = node.p2p peer.wait_for_verack() # Check that compact block also work for big blocks # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) wait_until(received_sendcmpct, timeout=30) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) wait_until(received_getheaders, timeout=30) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) wait_until(received_headers, timeout=30) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b1 = block(1, spend=out[0], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert (cmpctblk_header.sha256 == b1.sha256) # Send a large block with numerous transactions. peer.clear_block_data() b2 = block(2, spend=out[1], extra_txns=70000, block_size=self.excessive_block_size - 1000) yield accepted() # Checks the node forwards it via compact block wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert (cmpctblk_header.sha256 == b2.sha256) # In order to avoid having to resend a ton of transactions, we invalidate # b2, which will send all its transactions in the mempool. node.invalidateblock(node.getbestblockhash()) # Let's send a compact block and see if the node accepts it. # Let's modify b2 and use it so that we can reuse the mempool. tx = b2.vtx[0] tx.vout.append(CTxOut(0, CScript([random.randint(0, 256), OP_RETURN]))) tx.rehash() b2.vtx[0] = tx b2.hashMerkleRoot = b2.calc_merkle_root() b2.solve() # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b2) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert (int(node.getbestblockhash(), 16) == b2.sha256)