def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time, unspent_outputs, snapshot_meta): blocks = [] for i in range(nblocks): coinbase = sign_coinbase(self.nodes[0], create_coinbase(prev_height + 1, unspent_outputs[i], snapshot_meta.hash)) block_time = prev_median_time + 1 block = create_block(int(prev_hash, 16), coinbase, block_time) block.solve() blocks.append(block) prev_hash = block.hash snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, prev_height + 1, coinbase) prev_height += 1 prev_median_time = block_time return blocks
def create_test_block(self, coin, txs, version=536870912): coinbase = sign_coinbase( self.nodes[0], create_coinbase(self.tipheight + 1, coin, self.tip_snapshot_meta.hash)) block = create_block(self.tip, coinbase, self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) block.ensure_ltor() block.compute_merkle_trees() block.solve() self.tip_snapshot_meta = update_snapshot_with_tx( self.nodes[0], self.tip_snapshot_meta, self.tipheight + 1, coinbase) return block
def generate_blocks(self, coins, number, version, test_blocks=[]): for i in range(number): coin = coins.pop() coinbase = sign_coinbase( self.nodes[0], create_coinbase(self.height, coin, self.snapshot_meta.hash)) block = create_block(self.tip, coinbase, self.last_block_time + 1) block.nVersion = version block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.snapshot_meta = update_snapshot_with_tx( self.nodes[0], self.snapshot_meta, self.height, coinbase) self.tip = block.sha256 self.height += 1 return test_blocks
def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * UNIT), CScript([b'a'])) ] tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value) ] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - relayfee * UNIT), CScript([b'a' * 35])) ] tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee * UNIT)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert tx2.hash in self.nodes[0].getrawmempool() tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee * UNIT)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert tx2.hash not in self.nodes[0].getrawmempool() # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) assert tx3.hash in self.nodes[0].getrawmempool() self.nodes[0].generate(1) assert tx3.hash not in self.nodes[0].getrawmempool() # One more test, this time using height locks tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) assert tx4.hash in self.nodes[0].getrawmempool() # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) assert tx5.hash not in self.nodes[0].getrawmempool() utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * UNIT) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert tx4.hash not in self.nodes[0].getrawmempool() assert tx3.hash in self.nodes[0].getrawmempool() # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int( self.nodes[0].getblockhash(self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() # Let's get the available stake that is not already used # We must exclude tx2 outputs from the list since any stake referred to them will fail # In order to do that, we limit outputs with the number of minimum confirmations (minconf = 2) avail_stake = [ x for x in self.nodes[0].listunspent(2) if x['txid'] != tx1.hash ] for i in range(2): stake = avail_stake.pop() coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, stake, tip_snapshot_meta.hash)) block = create_block(tip, coinbase, cur_time) block.nVersion = 3 block.solve() tip = block.sha256 tip_snapshot_meta = update_snapshot_with_tx( self.nodes[0], tip_snapshot_meta, height, coinbase) height += 1 self.nodes[0].p2p.send_and_ping(msg_block(block)) cur_time += 1 # sync as the reorg is happening self.nodes[0].p2p.sync_with_ping() mempool = self.nodes[0].getrawmempool() assert tx3.hash not in mempool assert tx2.hash in mempool # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10)
def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") for i in range(4): self.log.debug("Part 1.{}: starting...".format(i)) old_tip = tip tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) # Try a few different responses; none should affect next announcement if i == 0: # first request the block test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.send_get_headers(locator=[old_tip], hashstop=tip) test_node.send_get_data([tip]) test_node.wait_for_block(tip) test_node.clear_block_announcements( ) # since we requested headers... elif i == 2: # this time announce own block via headers inv_node.clear_block_announcements() height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 snapshot_hash = get_tip_snapshot_meta(self.nodes[0]).hash new_block = create_block( tip, self.create_coinbase(height + 1, snapshot_hash), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock) inv_node.clear_block_announcements() test_node.clear_block_announcements() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message..." ) # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.send_get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(10): self.log.debug("Part 2.{}: starting...".format(i)) # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] coins = get_unspent_coins(self.nodes[0], i + 1) for b in range(i + 1): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx( self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." ) # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): self.log.debug("Part 3.{}: starting...".format(j)) # First try mining a reorg that can propagate with header announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=new_block_hashes) block_time += 8 # Mine a too-large reorg, which should be announced with a single inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) block_time += 9 fork_point = self.nodes[0].getblock( "%02x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) test_node.check_last_inv_announcement(inv=new_block_hashes) test_node.send_get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): self.log.debug("Part 3.{}.{}: starting...".format(j, i)) # Mine another block, still should get only an inv tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) if i == 0: # Just get the data -- shouldn't cause headers announcements to resume test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 2: # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. test_node.send_get_data([tip]) test_node.wait_for_block(tip) if j == 0: test_node.send_get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coins = get_unspent_coins(self.nodes[0], 2) for b in range(2): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] snapshots = [get_tip_snapshot_meta(self.nodes[0])] coins = get_unspent_coins(self.nodes[0], 3) for b in range(3): coinbase = self.create_coinbase(height, snapshots[-1].hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshots.append( update_snapshot_with_tx(self.nodes[0], snapshots[-1], height, coinbase)) block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 snapshot_meta = snapshots[1] height -= 2 blocks = [] # Create extra blocks for later coins = get_unspent_coins(self.nodes[0], 20) for b in range(20): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[b]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for i in range(10): self.log.debug("Part 5.{}: starting...".format(i)) test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. coins = get_unspent_coins(self.nodes[0], 2) for j in range(2): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[j]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx( self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coins = get_unspent_coins(self.nodes[0], MAX_UNCONNECTING_HEADERS + 1) for j in range(MAX_UNCONNECTING_HEADERS + 1): coinbase = self.create_coinbase(height, snapshot_meta.hash, coins[j]) blocks.append(create_block(tip, coinbase, block_time)) blocks[-1].solve() tip = blocks[-1].sha256 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message
def run_test(self): # Create a block with 2500 stakeable outputs self.build_coins_to_stake() # Propagate it to nodes 1 and 2 and stop them for now self.sync_first_block() # Key Management for node 0 keytool = KeyTool.for_node(self.nodes[0]) # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = keytool.make_privkey() coinbase_pubkey = bytes(coinbase_key.get_pubkey()) keytool.upload_key(coinbase_key) self.log.info( "Create the first block with a coinbase output to our key") height = 2 snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) coin = self.get_coin_to_stake() coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 utxo1 = UTXO(height, TxType.COINBASE, COutPoint(coinbase.sha256, 0), coinbase.vout[0]) snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 self.log.info( "Bury the block 100 deep so the coinbase output is spendable") for i in range(100): coin = self.get_coin_to_stake() coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 self.log.info( "Create a transaction spending the coinbase output with an invalid (null) signature" ) tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append( CTxOut((PROPOSER_REWARD - 1) * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() coin = self.get_coin_to_stake() coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block102 = create_block(self.tip, coinbase, self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.compute_merkle_trees() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) utxo2 = UTXO(height, tx.get_type(), COutPoint(tx.sha256, 0), tx.vout[0]) snapshot_meta = calc_snapshot_hash(self.nodes[0], snapshot_meta, height, [utxo1], [utxo2]) height += 1 self.log.info("Bury the assumed valid block 2100 deep") for i in range(2100): coin = self.get_coin_to_stake() coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, coin, snapshot_meta.hash, coinbase_pubkey)) block = create_block(self.tip, coinbase, self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height, coinbase) height += 1 self.nodes[0].disconnect_p2ps() self.log.info( "Start node1 and node2 with assumevalid so they accept a block with a bad signature." ) self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) self.log.info("Send blocks to node0. Block 103 will be rejected.") self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 102) self.log.info("Send all blocks to node1. All blocks will be accepted.") for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal( self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2203) self.log.info("Send blocks to node2. Block 102 will be rejected.") self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 102)
def run_test(self): """Main test logic""" # Create P2P connections to two of the nodes self.nodes[0].add_p2p_connection(BaseNode()) # Start up network handling in another thread. This needs to be called # after the P2P connections have been created. network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.setup_stake_coins(self.nodes[0]) # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)] self.sync_all([self.nodes[0:1]]) # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 height = self.nodes[0].getblockcount() snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) stakes = self.nodes[0].listunspent() for stake in stakes: # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. coinbase = sign_coinbase( self.nodes[0], create_coinbase(height, stake, snapshot_meta.hash)) block = create_block(self.tip, coinbase, self.block_time) # Wait until the active chain picks up the previous block wait_until(lambda: self.nodes[0].getblockcount() == height, timeout=5) snapshot_meta = update_snapshot_with_tx(self.nodes[0], snapshot_meta, height + 1, coinbase) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our P2PInterface self.nodes[0].p2p.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info( "Wait for node1 to reach current tip (height %d) using RPC" % height) self.nodes[1].waitforblockheight(height) self.log.info("Connect node2 and node1") connect_nodes(self.nodes[1], 2) self.log.info("Add P2P connection to node2") # We can't add additional P2P connections once the network thread has started. Disconnect the connection # to node0, wait for the network thread to terminate, then connect to node2. This is specific to # the current implementation of the network thread and may be improved in future. self.nodes[0].disconnect_p2ps() network_thread_join() self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[2].p2p.wait_for_verack() self.log.info( "Wait for node2 reach current tip. Test that it has propagated all the blocks to us" ) getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) self.nodes[2].p2p.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. wait_until(lambda: sorted(blocks) == sorted( list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate. with mininode_lock: for block in self.nodes[2].p2p.block_receive_map.values(): assert_equal(block, 1)