def test_null_locators(self, test_node, inv_node): tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress( 1, self.nodes[0].get_deterministic_priv_key().address)[0]) tip_hash = int(tip["hash"], 16) final_tx = get_final_tx_info(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip_hash]) test_node.check_last_inv_announcement(inv=[tip_hash]) self.log.info( "Verify getheaders with null locator and valid hashstop returns headers." ) test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=tip_hash) test_node.check_last_headers_announcement(headers=[tip_hash]) self.log.info( "Verify getheaders with null locator and invalid hashstop does not return headers." ) block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1) final_tx = add_final_tx(final_tx, block) block.solve() test_node.send_header_for_blocks([block]) test_node.clear_block_announcements() test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16)) test_node.sync_with_ping() assert_equal(test_node.block_announced, False) inv_node.clear_block_announcements() test_node.send_message(msg_block(block)) inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_bip68_not_consensus(self): assert (get_bip9_status(self.nodes[0], 'locktime')['status'] != 'active') txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a'])) ] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a' * 35])) ] tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) final_tx = get_final_tx_info(self.nodes[0]) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend([tx1, tx2, tx3]) final_tx = add_final_tx(final_tx, block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() add_witness_commitment(block) block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def submit_block_with_tx(node, tx): ctx = CTransaction() ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx))) tip = node.getbestblockhash() height = node.getblockcount() + 1 block_time = node.getblockheader(tip)["mediantime"] + 1 block = create_block(int(tip, 16), create_coinbase(height), block_time) block.vtx.append(ctx) if height > 100: add_final_tx(get_final_tx_info(node), block) block.rehash() block.hashMerkleRoot = block.calc_merkle_root() add_witness_commitment(block) block.solve() node.submitblock(bytes_to_hex_str(block.serialize(True))) return block
def run_test(self): node = self.nodes[0] # alias node.add_p2p_connection(P2PStoreTxInvs()) self.log.info("Create a new transaction and wait until it's broadcast") txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16) # Can take a few seconds due to transaction trickling wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock) # Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown) node.add_p2p_connection(P2PStoreTxInvs()) self.log.info("Create a block") # Create and submit a block without the transaction. # Transactions are only rebroadcast if there has been a block at least five minutes # after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure. block_time = int(time.time()) + 6 * 60 node.setmocktime(block_time) block = create_block( int(node.getbestblockhash(), 16), create_coinbase(node.getblockchaininfo()['blocks'] + 1), block_time) add_final_tx(get_final_tx_info(node), block) block.nVersion = 3 block.rehash() block.solve() node.submitblock(ToHex(block)) # Transaction should not be rebroadcast node.p2ps[1].sync_with_ping() assert_equal(node.p2ps[1].tx_invs_received[txid], 0) self.log.info("Transaction should be rebroadcast after 30 minutes") # Use mocktime and give an extra 5 minutes to be sure. rebroadcast_time = int(time.time()) + 41 * 60 node.setmocktime(rebroadcast_time) wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
def send_blocks_with_version(self, peer, numblocks, version): """Send numblocks blocks to peer with version set""" tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() block_time = self.nodes[0].getblockheader(tip)["time"] + 1 tip = int(tip, 16) final_tx = False for _ in range(numblocks): block = create_block(tip, create_coinbase(height + 1), block_time) block.nVersion = version if height >= 100: if not final_tx: final_tx = get_final_tx_info(self.nodes[0]) final_tx = add_final_tx(final_tx, block) block.solve() peer.send_message(msg_block(block)) block_time += 1 height += 1 tip = block.sha256 peer.sync_with_ping()
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that a transaction with non-DER signature can still appear in a block" ) final_tx = get_final_tx_info(self.nodes[0]) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) final_tx = add_final_tx(final_tx, block) block.nVersion = 2 block.vtx.insert(-1, spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) final_tx = add_final_tx(final_tx, block) block.nVersion = 2 block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ '{}, bad-version(0x00000002)'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)' }], self.nodes[0].testmempoolaccept( rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)) # Now we verify that a block with this transaction is also invalid. block.vtx.insert(-1, spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ 'CheckInputs on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)' .format(block.vtx[-2].hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) assert b'Non-canonical DER signature' in self.nodes[ 0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def test_nonnull_locators(self, test_node, inv_node): tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info( "Part 1: headers don't start before sendheaders message...") for i in range(4): self.log.debug("Part 1.{}: starting...".format(i)) old_tip = tip tip = self.mine_blocks(1) final_tx = get_final_tx_info(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) # Try a few different responses; none should affect next announcement if i == 0: # first request the block test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.send_get_headers(locator=[old_tip], hashstop=tip) test_node.send_get_data([tip]) test_node.wait_for_block(tip) test_node.clear_block_announcements( ) # since we requested headers... elif i == 2: # this time announce own block via headers inv_node.clear_block_announcements() height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 new_block = create_block(tip, create_coinbase(height + 1), block_time) final_tx = add_final_tx(final_tx, new_block) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock) inv_node.clear_block_announcements() test_node.clear_block_announcements() self.log.info("Part 1: success!") self.log.info( "Part 2: announce blocks with headers after sendheaders message..." ) # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.send_get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) final_tx = get_final_tx_info(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height = self.nodes[0].getblockcount() + 1 block_time += 10 # Advance far enough ahead for i in range(10): self.log.debug("Part 2.{}: starting...".format(i)) # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] for b in range(i + 1): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [inv_node.send_block_inv(x.sha256) for x in blocks] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) final_tx = get_final_tx_info(self.nodes[0]) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info( "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." ) # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): self.log.debug("Part 3.{}: starting...".format(j)) # First try mining a reorg that can propagate with header announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=new_block_hashes) block_time += 8 # Mine a too-large reorg, which should be announced with a single inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) block_time += 9 fork_point = self.nodes[0].getblock( "%064x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator=[fork_point]) test_node.check_last_inv_announcement(inv=new_block_hashes) test_node.send_get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): self.log.debug("Part 3.{}.{}: starting...".format(j, i)) # Mine another block, still should get only an inv tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_inv_announcement(inv=[tip]) if i == 0: # Just get the data -- shouldn't cause headers announcements to resume test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) test_node.send_get_data([tip]) test_node.wait_for_block(tip) elif i == 2: # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. test_node.send_get_data([tip]) test_node.wait_for_block(tip) if j == 0: test_node.send_get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) inv_node.check_last_inv_announcement(inv=[tip]) test_node.check_last_headers_announcement(headers=[tip]) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 final_tx = get_final_tx_info(self.nodes[0]) # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for b in range(2): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for b in range(3): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) if not blocks: old_final_tx = final_tx blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 2 blocks = [] final_tx = old_final_tx # Create extra blocks for later for b in range(20): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [test_node.send_message(msg_block(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): self.log.debug("Part 5.{}: starting...".format(i)) test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS + 1): block = create_block(tip, create_coinbase(height), block_time) final_tx = add_final_tx(final_tx, block) blocks.append(block) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message
def run_test(self): # Add p2p connection to node0 node = self.nodes[0] # convenience reference to the node node.add_p2p_connection(P2PDataStore()) # Let freicoind handle the block-final initial output logic self.nodes[0].generate(1) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 self.log.info("Create a new block with an anyone-can-spend coinbase") height = self.nodes[0].getblockcount() + 1 block = create_block(tip, create_coinbase(height), block_time) block.solve() # Save the coinbase for later block1 = block tip = block.sha256 node.p2p.send_blocks_and_test([block1], node, success=True) self.log.info("Mature the block.") node.generatetoaddress(100, node.get_deterministic_priv_key().address) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 final_tx = get_final_tx_info(node) # Use merkle-root malleability to generate an invalid block with # same blockheader. # Manufacture a block with 3 transactions (coinbase, spend of prior # coinbase, spend of that spend). Duplicate the 3rd transaction to # leave merkle root and blockheader unchanged but invalidate the block. self.log.info("Test merkle root malleability.") block2 = create_block(tip, create_coinbase(height), block_time) block_time += 1 # b'0x51' is OP_TRUE tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN) tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN) block2.vtx.extend([tx1]) block2.hashMerkleRoot = block2.calc_merkle_root() add_final_tx(final_tx, block2) block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(block2.vtx[-1]) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert block2_orig.vtx != block2.vtx node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate') # Check transactions for duplicate inputs self.log.info("Test duplicate input block.") block2_orig.vtx[1].vin.append(block2_orig.vtx[1].vin[0]) block2_orig.vtx[1].rehash() block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root() block2_orig.rehash() block2_orig.solve() node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_reason='bad-txns-inputs-duplicate') self.log.info("Test very broken block.") block3 = create_block(tip, create_coinbase(height), block_time) block_time += 1 block3.vtx[0].vout[0].nValue = 1000000 * COIN # Too high! block3.vtx[0].sha256 = None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() final_tx = add_final_tx(final_tx, block3) block3.rehash() block3.solve() node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
def run_test(self): self.nodes[0].add_p2p_connection(P2PDataStore()) self.log.info("Generate blocks in the past for coinbase outputs.") long_past_time = int( time.time() ) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future self.nodes[0].setmocktime( long_past_time - 100 ) # enough so that the generated blocks will still all be before long_past_time self.coinbase_blocks = self.nodes[0].generate( 1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs self.nodes[0].setmocktime( 0 ) # set time back to present so yielded blocks aren't in the future as we advance last_block_time self.tipheight = 82 # height of the next block to build self.last_block_time = long_past_time self.tip = int(self.nodes[0].getbestblockhash(), 16) self.nodeaddress = self.nodes[0].getnewaddress() self.initial_coinbase_hash = self.nodes[0].getblock( self.coinbase_blocks[0])['tx'][0] self.final_tx = [{ 'txid': self.initial_coinbase_hash, 'vout': 0, 'amount': 0, }] self.log.info("Test that the csv softfork is DEFINED") assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'defined') test_blocks = self.generate_blocks(61, 4) self.sync_blocks(test_blocks) self.log.info("Advance from DEFINED to STARTED, height = 143") assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'started') self.log.info("Fail to achieve LOCKED_IN") # 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks test_blocks = self.generate_blocks( 50, 536870913) # 0x20000001 (signalling ready) test_blocks = self.generate_blocks( 20, 4, test_blocks) # 0x00000004 (signalling not) test_blocks = self.generate_blocks( 50, 536871169, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks( 24, 536936448, test_blocks) # 0x20010000 (signalling not) self.sync_blocks(test_blocks) self.log.info("Failed to advance past STARTED, height = 287") assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'started') self.log.info("Generate blocks to achieve LOCK-IN") # 108 out of 144 signal bit 0 to achieve lock-in # using a variety of bits to simulate multiple parallel softforks test_blocks = self.generate_blocks( 58, 536870913) # 0x20000001 (signalling ready) test_blocks = self.generate_blocks( 26, 4, test_blocks) # 0x00000004 (signalling not) test_blocks = self.generate_blocks( 50, 536871169, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks( 10, 536936448, test_blocks) # 0x20010000 (signalling not) self.sync_blocks(test_blocks) self.log.info("Advanced from STARTED to LOCKED_IN, height = 431") assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'locked_in') # Generate 140 more version 4 blocks test_blocks = self.generate_blocks(140, 4) self.sync_blocks(test_blocks) # Inputs at height = 572 # # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): bip68inputs.append( send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append( send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append( send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) # 1 normal input bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress) self.nodes[0].setmocktime(self.last_block_time + 600) inputblockhash = self.nodes[0].generate(1)[ 0] # 1 block generated for inputs to be in chain at height 572 self.nodes[0].setmocktime(0) self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 self.final_tx = get_final_tx_info(self.nodes[0]) assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 1 + 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2, 4) self.sync_blocks(test_blocks) self.log.info( "Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)" ) assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'locked_in') # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to put in appropriate block time bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress) bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) bip112txs_vary_nSequence_9_v2 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress) bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress) self.log.info("TESTING") self.log.info("Pre-Soft Fork Tests. All txs should pass.") self.log.info("Test version 1 txs") success_txs = [] # add BIP113 tx and -1 CSV tx bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") success_txs = [] # add BIP113 tx and -1 CSV tx bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) self.sync_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block test_blocks = self.generate_blocks(1, 4) self.sync_blocks(test_blocks) assert_equal( get_bip9_status(self.nodes[0], 'locktime')['status'], 'active') self.log.info("Post-Soft Fork Tests.") self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: self.sync_blocks([self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: self.sync_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4, 1234) self.sync_blocks(test_blocks) self.log.info("BIP 68 tests") self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 bip68timetxs = [ tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf'] ] for tx in bip68timetxs: self.sync_blocks([self.create_test_block([tx])], success=False) bip68heighttxs = [ tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf'] ] for tx in bip68heighttxs: self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1, 1234) self.sync_blocks(test_blocks) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: self.sync_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1, 1234) self.sync_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) self.sync_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ## Note: The following tests of BIP 112 (checksequenceverify) ## have been modified to assume that BIP 112 hass NOT ## activated. This should be reverted back to the ## original behaviour and the tests modified to use ## segwit script whenn CHECKSEQUENCEVERIFY is finally ## deployed as part of the segwit script update. self.log.info("BIP 112 tests") self.log.info("Test version 1 txs") # -1 OP_CSV tx should fail self.sync_blocks([self.create_test_block([bip112tx_special_v1])]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass success_txs = [ tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf'] ] success_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf'] ] self.sync_blocks([self.create_test_block(success_txs)], success=False) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) fail_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf'] ] fail_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf'] ] for tx in fail_txs: self.sync_blocks([self.create_test_block([tx])], success=False) self.log.info("Test version 2 txs") # -1 OP_CSV tx should fail self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) success_txs = [ tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf'] ] success_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf'] ] self.sync_blocks([self.create_test_block(success_txs)], success=False) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) fail_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf'] ] for tx in fail_txs: self.sync_blocks([self.create_test_block([tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [ tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf'] ] for tx in fail_txs: self.sync_blocks([self.create_test_block([tx])], success=False) # If sequencelock types mismatch, tx should fail fail_txs = [ tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf'] ] fail_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf'] ] for tx in fail_txs: self.sync_blocks([self.create_test_block([tx])], success=False) # Remaining txs should pass, just test masking works properly success_txs = [ tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf'] ] success_txs += [ tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf'] ] self.sync_blocks([self.create_test_block(success_txs)], success=False) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] for tx in [ tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf'] ]: tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG signtx = sign_transaction(self.nodes[0], tx) time_txs.append(signtx) self.sync_blocks([self.create_test_block(time_txs)], success=False) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a'])) ] tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value) ] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), CScript([b'a' * 35])) ] tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee * COIN)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert (tx2.hash in self.nodes[0].getrawmempool()) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Save the block-final tx info for later reorg final_tx = get_final_tx_info(self.nodes[0]) # Mine tx2, and then try again self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee * COIN)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert (tx2.hash not in self.nodes[0].getrawmempool()) # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) assert (tx3.hash in self.nodes[0].getrawmempool()) self.nodes[0].generate(1) assert (tx3.hash not in self.nodes[0].getrawmempool()) # One more test, this time using height locks tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) assert (tx4.hash in self.nodes[0].getrawmempool()) # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) assert (tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert (tx4.hash not in self.nodes[0].getrawmempool()) assert (tx3.hash in self.nodes[0].getrawmempool()) # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int( self.nodes[0].getblockhash(self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) final_tx = add_final_tx(final_tx, block) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert (tx3.hash not in mempool) assert (tx2.hash in mempool) # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10)