def send_block(self, node, txs, accept=False): snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash coin = get_unspent_coins(self.nodes[0], 1)[0] block = create_block( self.tip, create_coinbase(self.lastblockheight + 1, coin, snapshot_hash), self.lastblocktime + 1) block.vtx[0] = sign_coinbase(self.nodes[0], block.vtx[0]) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.ensure_ltor() block.compute_merkle_trees() block.solve() node.p2p.send_and_ping(msg_block(block)) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash)
def solve_and_send_block(prevhash, height, time): snapshot_hash = get_tip_snapshot_meta(node).hash stake = get_unspent_coins(node, 1)[0] b = create_block( prevhash, sign_coinbase(node, create_coinbase(height, stake, snapshot_hash)), time) b.solve() node.p2p.send_message(msg_block(b)) node.p2p.sync_with_ping() return b
def build_block_on_tip(self, node, txs=[]): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] meta = get_tip_snapshot_meta(node) coin = get_unspent_coins(node, 1)[0] block = create_block( int(tip, 16), sign_coinbase(node, create_coinbase(height + 1, coin, meta.hash)), mtp + 1) block.nVersion = 4 if txs: block.vtx.extend(txs) block.ensure_ltor() block.compute_merkle_trees() block.solve() return block
def run_test(self): self.setup_stake_coins(*self.nodes) # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) fork_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) utxo_manager = UTXOManager(self.nodes[0], fork_snapshot_meta) genesis_coin = get_unspent_coins(self.nodes[0], 1)[0] genesis_txout = CTxOut( int(genesis_coin['amount'] * UNIT), CScript(hex_str_to_bytes(genesis_coin['scriptPubKey']))) genesis_utxo = [ UTXO( 0, TxType.COINBASE, COutPoint(int(genesis_coin['txid'], 16), genesis_coin['vout']), genesis_txout) ] utxo_manager.available_outputs = genesis_utxo self.log.info("1. Have nodes mine a block (leave IBD)") [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) self.log.info( "2. Send one block that builds on each tip. This should be accepted by node0." ) blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 coin = get_unspent_coins(self.nodes[0], 1)[0] for i in range(2): coinbase = sign_coinbase( self.nodes[0], create_coinbase(2, coin, tip_snapshot_meta.hash)) blocks_h2.append(create_block(tips[i], coinbase, block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1" ) self.log.info("3. Send another block that builds on genesis.") coinbase = utxo_manager.get_coinbase(1, n_pieces=300) block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), coinbase, block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) utxo_manager.process(coinbase, 1) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) self.log.info("4. Send another two block that build on the fork.") coinbase = utxo_manager.get_coinbase(2) block_h2f = create_block(block_h1f.sha256, coinbase, block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) utxo_manager.process(coinbase, 2) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") self.log.info( "4b. Now send another block that builds on the forking chain.") coinbase = utxo_manager.get_coinbase(3) block_h3 = create_block(block_h2f.sha256, coinbase, block_h2f.nTime + 1) block_h3.solve() test_node.send_message(msg_block(block_h3)) utxo_manager.process(coinbase, 3) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") self.log.info("4c. Now mine 288 more blocks and deliver") # all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for height in range(4, 292): coinbase = utxo_manager.get_coinbase(height) next_block = create_block(tip.sha256, coinbase, tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block utxo_manager.process(coinbase, height) # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) self.log.info( "5. Test handling of unrequested block on the node that didn't process" ) # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) self.log.info("6. Try to get node to request the missing block.") # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") self.log.info( "7. Send the missing block for the third time (now it is requested)" ) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") self.log.info( "8. Create a chain which is invalid at a height longer than the") # current chain, but which has more blocks on top of that # Reset utxo managers to current state utxo_fork_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_fork_manager.available_outputs = utxo_manager.available_outputs utxo_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_manager.available_outputs = utxo_fork_manager.available_outputs # Create one block on top of the valid chain coinbase = utxo_manager.get_coinbase(291) valid_block = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) valid_block.solve() test_node.send_and_ping(msg_block(valid_block)) assert_equal(self.nodes[0].getblockcount(), 291) # Create three blocks on a fork, but make the second one invalid coinbase = utxo_fork_manager.get_coinbase(291) block_291f = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) block_291f.solve() utxo_fork_manager.process(coinbase, 291) coinbase = utxo_fork_manager.get_coinbase(292) block_292f = create_block(block_291f.sha256, coinbase, block_291f.nTime + 1) # block_292f spends a coinbase below maturity! block_292f.vtx.append( create_tx_with_script(block_291f.vtx[0], 0, script_sig=b"42", amount=1)) block_292f.compute_merkle_trees() block_292f.solve() utxo_fork_manager.process(coinbase, 292) utxo_fork_manager.process(block_292f.vtx[1], 292) coinbase = utxo_fork_manager.get_coinbase(293) block_293f = create_block(block_292f.sha256, coinbase, block_292f.nTime + 1) block_293f.solve() utxo_fork_manager.process(coinbase, 293) # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_291f)) headers_message.headers.append(CBlockHeader(block_292f)) headers_message.headers.append(CBlockHeader(block_293f)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_293f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_293f.hash) test_node.send_message(msg_block(block_291f)) test_node.sync_with_ping() self.nodes[0].getblock(block_291f.hash) test_node.send_message(msg_block(block_292f)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 291) assert_equal(self.nodes[0].getbestblockhash(), valid_block.hash) assert_equal(self.nodes[0].getblock(block_292f.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected coinbase = utxo_fork_manager.get_coinbase(294) block_294f = create_block(block_293f.sha256, coinbase, block_293f.nTime + 1) block_294f.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_294f)) test_node.send_message(headers_message) test_node.wait_for_disconnect() self.log.info( "9. Connect node1 to node0 and ensure it is able to sync") connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") for i in range(4): self.log.debug("Part 1.{}: starting...".format(i)) old_tip = tip tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) # Try a few different responses; none should affect next announcement if i == 0: # first request the block test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.send_get_headers(locator=[old_tip], hashstop=tip) test_node.send_get_data([tip]) test_node.wait_for_block(tip) test_node.clear_block_announcements( ) # since we requested headers... elif i == 2: # this time announce own block via headers inv_node.clear_block_announcements() height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash new_block = create_block( tip, self.create_coinbase(height + 1, snapshot_hash), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock) inv_node.clear_block_announcements() test_node.clear_block_announcements() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message..." ) # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.send_get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(10): self.log.debug("Part 2.{}: starting...".format(i)) # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] coins = get_unspent_coins(self.nodes[0], i + 1) for b in range(i + 1): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx( self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." ) # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): self.log.debug("Part 3.{}: starting...".format(j)) # First try mining a reorg that can propagate with header announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=new_block_hashes) block_time += 8 # Mine a too-large reorg, which should be announced with a single inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) block_time += 9 fork_point = self.nodes[0].getblock( "%02x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) test_node.check_last_inv_announcement(inv=new_block_hashes) test_node.send_get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): self.log.debug("Part 3.{}.{}: starting...".format(j, i)) # Mine another block, still should get only an inv tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) if i == 0: # Just get the data -- shouldn't cause headers announcements to resume test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 2: # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. test_node.send_get_data([tip]) test_node.wait_for_block(tip) if j == 0: test_node.send_get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coins = get_unspent_coins(self.nodes[0], 2) for b in range(2): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] snapshots = [get_tip_snapshot_meta(self.nodes[0])] coins = get_unspent_coins(self.nodes[0], 3) for b in range(3): coinbase = self.create_coinbase(height, snapshots[-1].hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshots.append( update_snapshot_with_tx(self.nodes[0], snapshots[-1], height, coinbase)) block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 snapshot_meta = snapshots[1] height -= 2 blocks = [] # Create extra blocks for later coins = get_unspent_coins(self.nodes[0], 20) for b in range(20): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(10): self.log.debug("Part 5.{}: starting...".format(i)) test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. coins = get_unspent_coins(self.nodes[0], 2) for j in range(2): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[j]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx( self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coins = get_unspent_coins(self.nodes[0], MAX_UNCONNECTING_HEADERS + 1) for j in range(MAX_UNCONNECTING_HEADERS + 1): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[j]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message
def create_coinbase(self, height, snapshot_hash, coin=None): if coin is None: coin = get_unspent_coins(self.nodes[0], 1)[0] return sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_hash))
def run_test(self): self.setup_stake_coins(self.nodes[0]) self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining one block") self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(1) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)' }], self.nodes[0].testmempoolaccept( rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)) # Now we verify that a block with this transaction is also invalid. tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash coin = get_unspent_coins(self.nodes[0], 1)[0] coinbase = sign_coinbase(self.nodes[0], create_coinbase(1, coin, snapshot_hash)) block = create_block(int(tip, 16), coinbase, block_time) block.nVersion = 3 block.vtx.append(spendtx) block.compute_merkle_trees() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), int(tip, 16)) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # unit-e is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and unit-e produces more specific reject # reasons. assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid reject_reason = self.nodes[0].p2p.last_message["reject"].reason assert_equal(reject_reason, b'block-validation-failed') else: assert b'Non-canonical DER signature' in self.nodes[ 0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) block.compute_merkle_trees() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): node = self.nodes[0] # convenience reference to the node self.setup_stake_coins(self.nodes[0]) self.bootstrap_p2p() # Add one p2p connection to the node best_block = self.nodes[0].getbestblockhash() tip = int(best_block, 16) best_block_time = self.nodes[0].getblock(best_block)['time'] block_time = best_block_time + 1 self.log.info("Create a new block with an anyone-can-spend coinbase.") height = 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash coin = get_unspent_coins(self.nodes[0], 1)[0] coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_hash)) block = create_block(tip, coinbase, block_time) block_time += 1 block.solve() # Save the coinbase for later block1 = block tip = block.sha256 node.p2p.send_blocks_and_test([block], node, success=True) self.log.info("Mature the block.") blocks = [] snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(100): prev_coinbase = coinbase height += 1 stake = { 'txid': prev_coinbase.hash, 'vout': 1, 'amount': prev_coinbase.vout[1].nValue / UNIT } coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, stake, snapshot_meta.hash)) block = create_block(tip, coinbase, block_time) block.solve() tip = block.sha256 block_time += 1 blocks.append(block) input_utxo = UTXO(height - 1, TxType.COINBASE, coinbase.vin[1].prevout, prev_coinbase.vout[1]) output_reward = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) output_stake = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 1), coinbase.vout[1]) snapshot_meta = calc_snapshot_hash(self.nodes[0], snapshot_meta, height, [input_utxo], [output_reward, output_stake], coinbase) node.p2p.send_blocks_and_test(blocks, node, success=True) # b'\x64' is OP_NOTIF # Transaction will be rejected with code 16 (REJECT_INVALID) # and we get disconnected immediately self.log.info('Test a transaction that is rejected') tx1 = create_tx_with_script(coinbase, 1, script_sig=b'\x64' * 35, amount=50 * UNIT - 12000) node.p2p.send_txs_and_test([tx1], node, success=False, expect_disconnect=True) # Make two p2p connections to provide the node with orphans # * p2ps[0] will send valid orphan txs (one with low fee) # * p2ps[1] will send an invalid orphan tx (and is later disconnected for that) self.reconnect_p2p(num_connections=2) self.log.info('Test orphan transaction handling ... ') # Create a root transaction that we withhold until all dependend transactions # are sent out and in the orphan cache SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51' tx_withhold = CTransaction() tx_withhold.vin.append( CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0))) tx_withhold.vout.append( CTxOut(nValue=PROPOSER_REWARD * UNIT - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) tx_withhold.calc_sha256() # Our first orphan tx with some outputs to create further orphan txs tx_orphan_1 = CTransaction() tx_orphan_1.vin.append( CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0))) tx_orphan_1.vout = [ CTxOut(nValue=1 * UNIT, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE) ] * 3 tx_orphan_1.calc_sha256() # A valid transaction with low fee tx_orphan_2_no_fee = CTransaction() tx_orphan_2_no_fee.vin.append( CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0))) tx_orphan_2_no_fee.vout.append( CTxOut(nValue=1 * UNIT, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) # A valid transaction with sufficient fee tx_orphan_2_valid = CTransaction() tx_orphan_2_valid.vin.append( CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1))) tx_orphan_2_valid.vout.append( CTxOut(nValue=1 * UNIT - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) tx_orphan_2_valid.calc_sha256() # An invalid transaction with negative fee tx_orphan_2_invalid = CTransaction() tx_orphan_2_invalid.vin.append( CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2))) tx_orphan_2_invalid.vout.append( CTxOut(nValue=Decimal('1.1') * UNIT, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)) self.log.info('Send the orphans ... ') # Send valid orphan txs from p2ps[0] node.p2p.send_txs_and_test( [tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False) # Send invalid tx from p2ps[1] node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False) assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected self.log.info('Send the withhold tx ... ') node.p2p.send_txs_and_test([tx_withhold], node, success=True) # Transactions that should end up in the mempool expected_mempool = { t.hash for t in [ tx_withhold, # The transaction that is the root for all orphans tx_orphan_1, # The orphan transaction that splits the coins tx_orphan_2_valid, # The valid transaction (with sufficient fee) ] } # Transactions that do not end up in the mempool # tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx) # tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx) wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected assert_equal(expected_mempool, set(node.getrawmempool())) # restart node with sending BIP61 messages disabled, check that it disconnects without sending the reject message self.log.info( 'Test a transaction that is rejected, with BIP61 disabled') self.restart_node(0, ['-enablebip61=0', '-persistmempool=0']) self.reconnect_p2p(num_connections=1) with node.assert_debug_log(expected_msgs=[ "{} from peer=0 was not accepted: mandatory-script-verify-flag-failed (Invalid OP_IF construction) (code 16)" .format(tx1.hash), "disconnecting peer=0", ]): node.p2p.send_txs_and_test([tx1], node, success=False, expect_disconnect=True) # send_txs_and_test will have waited for disconnect, so we can safely check that no reject has been received assert_equal(node.p2p.reject_code_received, None)
def run_test(self): self.setup_stake_coins(self.nodes[0]) self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining one block") self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(1) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)' }], self.nodes[0].testmempoolaccept( rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash coin = get_unspent_coins(self.nodes[0], 1)[0] coinbase = sign_coinbase(self.nodes[0], create_coinbase(1, coin, snapshot_hash)) block = create_block(int(tip, 16), coinbase, block_time) block.nVersion = 4 block.vtx.append(spendtx) block.compute_merkle_trees() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = cltv_validate(self.nodes[0], spendtx, 0) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.compute_merkle_trees() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.setup_stake_coins(self.nodes[0]) node0 = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() node0.wait_for_verack() # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) # Generating a chain of 10 blocks block_hashes = self.nodes[0].generate(nblocks=8) snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) unspent_outputs = get_unspent_coins(self.nodes[0], 5, lock=True) block_hashes += self.nodes[0].generate(nblocks=2) unspent_outputs = get_unspent_coins(self.nodes[0], 5) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time, unspent_outputs, snapshot_meta) # Force reorg to a longer chain node0.send_message(msg_headers(new_blocks)) node0.wait_for_getdata() for block in new_blocks: node0.send_and_ping(msg_block(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generate(nblocks=1)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) wait_until(test_function, timeout=3) self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) wait_until(test_function, timeout=3)
def get_tests(self): self.setup_stake_coins(self.nodes[0]) if self.tip is None: self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 ''' Create a new block with an anyone-can-spend coinbase ''' height = 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash coin = get_unspent_coins(self.nodes[0], 1)[0] coinbase = sign_coinbase(self.nodes[0], create_coinbase(height, coin, snapshot_hash)) block = create_block(self.tip, coinbase, self.block_time) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Now we need that block to mature so we can spend the coinbase. ''' test = TestInstance(sync_every_block=False) snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(100): prev_coinbase = coinbase stake = { 'txid': prev_coinbase.hash, 'vout': 1, 'amount': prev_coinbase.vout[1].nValue / UNIT } coinbase = create_coinbase(height, stake, snapshot_meta.hash) block = create_block(self.tip, coinbase, self.block_time) block.solve() self.tip = block.sha256 self.block_time += 1 test.blocks_and_transactions.append([block, True]) input_utxo = UTXO(height - 1, TxType.COINBASE, coinbase.vin[1].prevout, prev_coinbase.vout[1]) output_reward = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) output_stake = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 1), coinbase.vout[1]) snapshot_meta = calc_snapshot_hash(self.nodes[0], snapshot_meta, height, [input_utxo], [output_reward, output_stake], coinbase) height += 1 yield test # b'\x64' is OP_NOTIF # Transaction will be rejected with code 16 (REJECT_INVALID) tx1 = create_transaction(coinbase, 1, b'\x64' * 35, 50 * UNIT - 12000) yield TestInstance( [[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]], sync_every_tx=True)