def test_resource_exhaustion(self): self.log.info("Test node stays up despite many large junk messages") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) conn2 = self.nodes[0].add_p2p_connection(P2PDataStore()) msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT) assert len(msg_at_size.serialize()) == MAX_PROTOCOL_MESSAGE_LENGTH self.log.info( "(a) Send 80 messages, each of maximum valid data size (4MB)") for _ in range(80): conn.send_message(msg_at_size) # Check that, even though the node is being hammered by nonsense from one # connection, it can still service other peers in a timely way. self.log.info("(b) Check node still services peers in a timely way") for _ in range(20): conn2.sync_with_ping(timeout=2) self.log.info( "(c) Wait for node to drop junk messages, while remaining connected" ) conn.sync_with_ping(timeout=400) # Despite being served up a bunch of nonsense, the peers should still be connected. assert conn.is_connected assert conn2.is_connected self.nodes[0].disconnect_p2ps()
def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() self.nodes[0].add_p2p_connection(P2PDataStore())
def test_duplicate_version_msg(self): self.log.info("Test duplicate version message is ignored") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log( ['redundant version message from peer']): conn.send_and_ping(msg_version()) self.nodes[0].disconnect_p2ps()
def bootstrap_p2p(self, *, num_connections=1): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" for _ in range(num_connections): self.nodes[0].add_p2p_connection(P2PDataStore(), node_outgoing=True)
def run_test(self): node = self.nodes[0] node.add_p2p_connection(P2PDataStore()) self.log.info("Sending block with invalid auxpow over P2P...") tip = node.getbestblockhash() blk, blkHash = self.createBlock() blk = self.addAuxpow(blk, blkHash, False) node.p2p.send_blocks_and_test([blk], node, force_send=True, success=False, reject_reason="high-hash") assert_equal(node.getbestblockhash(), tip) self.log.info("Sending the same block with valid auxpow...") blk = self.addAuxpow(blk, blkHash, True) node.p2p.send_blocks_and_test([blk], node, success=True) assert_equal(node.getbestblockhash(), blkHash) self.log.info("Submitting block with invalid auxpow...") tip = node.getbestblockhash() blk, blkHash = self.createBlock() blk = self.addAuxpow(blk, blkHash, False) assert_equal(node.submitblock(blk.serialize().hex()), "high-hash") assert_equal(node.getbestblockhash(), tip) self.log.info("Submitting block with valid auxpow...") blk = self.addAuxpow(blk, blkHash, True) assert_equal(node.submitblock(blk.serialize().hex()), None) assert_equal(node.getbestblockhash(), blkHash)
def bootstrap_p2p(self, *, num_connections=1): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" for node in self.nodes: for _ in range(num_connections): node.add_p2p_connection(P2PDataStore())
def bootstrap_p2p(self, *, num_connections=1): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" for _ in range(num_connections): self.nodes[0].add_p2p_connection(P2PDataStore()) for p2p in self.nodes[0].p2ps: p2p.wait_for_getheaders()
def check_tx_relay(self): block_op_true = self.nodes[0].getblock( self.nodes[0].generatetoaddress(100, ADDRESS_ECREG_P2SH_OP_TRUE)[0]) self.sync_all() self.log.debug( "Create a connection from a forcerelay peer that rebroadcasts raw txs") # A python mininode is needed to send the raw transaction directly. # If a full node was used, it could only rebroadcast via the inv-getdata # mechanism. However, even for forcerelay connections, a full node would # currently not request a txid that is already in the mempool. self.restart_node(1, extra_args=["[email protected]"]) p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection( P2PDataStore()) self.log.debug("Send a tx from the wallet initially") tx = FromHex(CTransaction(), self.nodes[0].createrawtransaction( inputs=[{'txid': block_op_true['tx'][0], 'vout': 0}], outputs=[{ADDRESS_ECREG_P2SH_OP_TRUE: 50}])) # push the one byte script to the stack tx.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx) txid = tx.rehash() self.log.debug("Wait until tx is in node[1]'s mempool") p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.log.debug( "Check that node[1] will send the tx to node[0] even though it" " is already in the mempool") self.connect_nodes(1, 0) with self.nodes[1].assert_debug_log( ["Force relaying tx {} from peer=0".format(txid)]): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.wait_until(lambda: txid in self.nodes[0].getrawmempool()) self.log.debug( "Check that node[1] will not send an invalid tx to node[0]") tx.vout[0].nValue += 1 txid = tx.rehash() # Send the transaction twice. The first time, it'll be rejected by ATMP # because it conflicts with a mempool transaction. The second time, # it'll be in the recentRejects filter. p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], success=False, reject_reason=f'{txid} from peer=0 was not accepted: ' f'txn-mempool-conflict', ) p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], success=False, reject_reason='Not relaying non-mempool transaction ' '{} from forcerelay peer=0'.format(txid), )
def check_for_ban_on_rejected_block(node, block, reject_reason=None): """Check we are disconnected when sending a block that the node rejects, then reconnect after. (Can't actually get banned, since bitcoind won't ban local peers.)""" node.p2p.send_blocks_and_test( [block], node, success=False, reject_reason=reject_reason, expect_disconnect=True) node.disconnect_p2ps() node.add_p2p_connection(P2PDataStore())
def run_test(self): capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture") # Connect a node so that the handshake occurs self.nodes[0].add_p2p_connection(P2PDataStore()) self.nodes[0].disconnect_p2ps() recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0] mini_parser(recv_file) sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0] mini_parser(sent_file)
def test_size(self): self.log.info("Test message with oversized payload disconnects peer") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['Header error: Size too large (badmsg, 33554433 bytes)']): msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1)) msg = conn.build_message(msg) conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps()
def check_tx_relay(self): block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress( 100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0]) self.sync_all() self.log.debug( "Create a connection from a forcerelay peer that rebroadcasts raw txs" ) # A test framework p2p connection is needed to send the raw transaction directly. If a full node was used, it could only # rebroadcast via the inv-getdata mechanism. However, even for forcerelay connections, a full node would # currently not request a txid that is already in the mempool. self.restart_node(1, extra_args=["[email protected]"]) p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection( P2PDataStore()) self.log.debug("Send a tx from the wallet initially") tx = FromHex( CTransaction(), self.nodes[0].createrawtransaction(inputs=[{ 'txid': block_op_true['tx'][0], 'vout': 0, }], outputs=[{ ADDRESS_BCRT1_P2WSH_OP_TRUE: 5, }]), ) tx.wit.vtxinwit = [CTxInWitness()] tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] txid = tx.rehash() self.log.debug("Wait until tx is in node[1]'s mempool") p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.log.debug( "Check that node[1] will send the tx to node[0] even though it is already in the mempool" ) connect_nodes(self.nodes[1], 0) with self.nodes[1].assert_debug_log( ["Force relaying tx {} from peer=0".format(txid)]): p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1]) self.wait_until(lambda: txid in self.nodes[0].getrawmempool()) self.log.debug( "Check that node[1] will not send an invalid tx to node[0]") tx.vout[0].nValue += 1 txid = tx.rehash() p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], success=False, reject_reason= 'Not relaying non-mempool transaction {} from forcerelay peer=0'. format(txid), )
def test_magic_bytes(self): self.log.info("Test message with invalid magic bytes disconnects peer") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['Header error: Wrong MessageStart ffffffff received']): msg = conn.build_message(msg_unrecognized(str_data="d")) # modify magic bytes msg = b'\xff' * 4 + msg[4:] conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps()
def test_magic_bytes(self): self.log.info("Test message with invalid magic bytes disconnects peer") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['HEADER ERROR - MESSAGESTART (badmsg, 2 bytes), received ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) # modify magic bytes msg = b'\xff' * 4 + msg[4:] conn.send_raw_message(msg) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps()
def test_msgtype(self): self.log.info("Test message with invalid message type logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']): msg = msg_unrecognized(str_data="d") msg.msgtype = b'\xff' * 12 msg = conn.build_message(msg) # Modify msgtype msg = msg[:7] + b'\x00' + msg[7 + 1:] conn.send_raw_message(msg) conn.sync_with_ping(timeout=1) self.nodes[0].disconnect_p2ps()
def bootstrap_p2p(self): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" self.nodes[0].add_p2p_connection(P2PDataStore()) # We need to wait for the initial getheaders from the peer before we # start populating our blockstore. If we don't, then we may run ahead # to the next subtest before we receive the getheaders. We'd then send # an INV for the next block and receive two getheaders - one for the # IBD and one for the INV. We'd respond to both and could get # unexpectedly disconnected if the DoS score for that error is 50. self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def test_checksum(self): self.log.info("Test message with invalid checksum logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) # Checksum is after start bytes (4B), message type (12B), len (4B) cut_len = 4 + 12 + 4 # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] conn.send_raw_message(msg) conn.sync_with_ping(timeout=1) self.nodes[0].disconnect_p2ps()
def test_msgtype(self): self.log.info("Test message with invalid message type logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['Header error: Invalid message type']): msg = msg_unrecognized(str_data="d") msg = conn.build_message(msg) # Modify msgtype msg = msg[:7] + b'\x00' + msg[7 + 1:] conn.send_raw_message(msg) conn.sync_with_ping(timeout=1) # Check that traffic is accounted for (24 bytes header + 2 bytes payload) assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26) self.nodes[0].disconnect_p2ps()
def test_checksum(self): self.log.info("Test message with invalid checksum logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['Header error: Wrong checksum (badmsg, 2 bytes), expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) # Checksum is after start bytes (4B), message type (12B), len (4B) cut_len = 4 + 12 + 4 # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] conn.send_raw_message(msg) conn.sync_with_ping(timeout=1) # Check that traffic is accounted for (24 bytes header + 2 bytes payload) assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26) self.nodes[0].disconnect_p2ps()
def test_checksum(self): self.log.info("Test message with invalid checksum logs an error") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) # Checksum is after start bytes (4B), message type (12B), len (4B) cut_len = 4 + 12 + 4 # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] conn.send_raw_message(msg) conn.wait_for_disconnect() # Check that after disconnect there is no peer assert_equal(len(self.nodes[0].getpeerinfo()), 0) self.nodes[0].disconnect_p2ps()
def run_test (self): node = self.nodes[0] p2pStore = node.add_p2p_connection (P2PDataStore ()) p2pGetter = node.add_p2p_connection (P2PBlockGetter ()) self.log.info ("Adding a block with non-zero hash in the auxpow...") blk, blkHash = self.createBlock () blk.auxpow.hashBlock = 12345678 blkHex = blk.serialize ().hex () assert_equal (node.submitblock (blkHex), None) assert_equal (node.getbestblockhash (), blkHash) self.log.info ("Retrieving block through RPC...") gotHex = node.getblock (blkHash, 0) assert gotHex != blkHex gotBlk = CBlock () gotBlk.deserialize (BytesIO (hex_str_to_bytes (gotHex))) assert_equal (gotBlk.auxpow.hashBlock, 0) self.log.info ("Retrieving block through P2P...") gotBlk = p2pGetter.getBlock (blkHash) assert_equal (gotBlk.auxpow.hashBlock, 0) self.log.info ("Sending zero-hash auxpow through RPC...") blk, blkHash = self.createBlock () blk.auxpow.hashBlock = 0 assert_equal (node.submitblock (blk.serialize ().hex ()), None) assert_equal (node.getbestblockhash (), blkHash) self.log.info ("Sending zero-hash auxpow through P2P...") blk, blkHash = self.createBlock () blk.auxpow.hashBlock = 0 p2pStore.send_blocks_and_test ([blk], node, success=True) assert_equal (node.getbestblockhash (), blkHash) self.log.info ("Sending non-zero nIndex auxpow through RPC...") blk, blkHash = self.createBlock () blk.auxpow.nIndex = 42 assert_equal (node.submitblock (blk.serialize ().hex ()), None) assert_equal (node.getbestblockhash (), blkHash) self.log.info ("Sending non-zero nIndex auxpow through P2P...") blk, blkHash = self.createBlock () blk.auxpow.nIndex = 42 p2pStore.send_blocks_and_test ([blk], node, success=True) assert_equal (node.getbestblockhash (), blkHash)
def test_buffer(self): self.log.info("Test message with header split across two buffers is received") conn = self.nodes[0].add_p2p_connection(P2PDataStore()) # Create valid message msg = conn.build_message(msg_ping(nonce=12345)) cut_pos = 12 # Chosen at an arbitrary position within the header # Send message in two pieces before = self.nodes[0].getnettotals()['totalbytesrecv'] conn.send_raw_message(msg[:cut_pos]) # Wait until node has processed the first half of the message self.wait_until(lambda: self.nodes[0].getnettotals()['totalbytesrecv'] != before) middle = self.nodes[0].getnettotals()['totalbytesrecv'] # If this assert fails, we've hit an unlikely race # where the test framework sent a message in between the two halves assert_equal(middle, before + cut_pos) conn.send_raw_message(msg[cut_pos:]) conn.sync_with_ping(timeout=1) self.nodes[0].disconnect_p2ps()
def run_test(self): self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD") for node in self.nodes: assert node.getblockchaininfo()['initialblockdownload'] self.wait_until(lambda: all(peer['minfeefilter'] == MAX_FEE_FILTER for peer in node.getpeerinfo())) self.log.info("Check that nodes don't send getdatas for transactions while still in IBD") peer_inver = self.nodes[0].add_p2p_connection(P2PDataStore()) txid = 0xdeadbeef peer_inver.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=txid)])) # The node should not send a getdata, but if it did, it would first delay 2 seconds self.nodes[0].setmocktime(int(time.time() + NONPREF_PEER_TX_DELAY)) peer_inver.sync_send_with_ping() with p2p_lock: assert txid not in peer_inver.getdata_requests self.nodes[0].disconnect_p2ps() self.log.info("Check that nodes don't process unsolicited transactions while still in IBD") # A transaction hex pulled from tx_valid.json. There are no valid transactions since no UTXOs # exist yet, but it should be a well-formed transaction. rawhex = "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004a01ff473" + \ "04402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e168" + \ "1a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696a" + \ "d990364e555c271ad504b88ac00000000" assert self.nodes[1].decoderawtransaction(rawhex) # returns a dict, should not throw tx = from_hex(CTransaction(), rawhex) peer_txer = self.nodes[0].add_p2p_connection(P2PInterface()) with self.nodes[0].assert_debug_log(expected_msgs=["received: tx"], unexpected_msgs=["was not accepted"]): peer_txer.send_and_ping(msg_tx(tx)) self.nodes[0].disconnect_p2ps() # Come out of IBD by generating a block self.generate(self.nodes[0], 1) self.log.info("Check that nodes reset minfilter after coming out of IBD") for node in self.nodes: assert not node.getblockchaininfo()['initialblockdownload'] self.wait_until(lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER for peer in node.getpeerinfo())) self.log.info("Check that nodes process the same transaction, even when unsolicited, when no longer in IBD") peer_txer = self.nodes[0].add_p2p_connection(P2PInterface()) with self.nodes[0].assert_debug_log(expected_msgs=["was not accepted"]): peer_txer.send_and_ping(msg_tx(tx))
def run_test(self): # Add p2p connection to node0 node = self.nodes[0] # convenience reference to the node peer = node.add_p2p_connection(P2PDataStore()) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 self.log.info("Create a new block with an anyone-can-spend coinbase") height = 1 block = create_block(tip, create_coinbase(height), block_time) block.solve() # Save the coinbase for later block1 = block tip = block.sha256 peer.send_blocks_and_test([block1], node, success=True) self.log.info("Mature the block.") node.generatetoaddress(100, node.get_deterministic_priv_key().address) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 # Use merkle-root malleability to generate an invalid block with # same blockheader (CVE-2012-2459). # Manufacture a block with 3 transactions (coinbase, spend of prior # coinbase, spend of that spend). Duplicate the 3rd transaction to # leave merkle root and blockheader unchanged but invalidate the block. # For more information on merkle-root malleability see src/consensus/merkle.cpp. self.log.info("Test merkle root malleability.") block2 = create_block(tip, create_coinbase(height), block_time) block_time += 1 # b'0x51' is OP_TRUE tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN) tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN) block2.vtx.extend([tx1, tx2]) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(tx2) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert block2_orig.vtx != block2.vtx peer.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate') # Check transactions for duplicate inputs (CVE-2018-17144) self.log.info("Test duplicate input block.") block2_dup = copy.deepcopy(block2_orig) block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0]) block2_dup.vtx[2].rehash() block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root() block2_dup.rehash() block2_dup.solve() peer.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate') self.log.info("Test very broken block.") block3 = create_block(tip, create_coinbase(height), block_time) block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256 = None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() peer.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount') # Complete testing of CVE-2012-2459 by sending the original block. # It should be accepted even though it has the same hash as the mutated one. self.log.info( "Test accepting original block after rejecting its mutated version." ) peer.send_blocks_and_test([block2_orig], node, success=True, timeout=5) # Update tip info height += 1 block_time += 1 tip = int(block2_orig.hash, 16) # Complete testing of CVE-2018-17144, by checking for the inflation bug. # Create a block that spends the output of a tx in a previous block. block4 = create_block(tip, create_coinbase(height), block_time) tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN) # Duplicates input tx3.vin.append(tx3.vin[0]) tx3.rehash() block4.vtx.append(tx3) block4.hashMerkleRoot = block4.calc_merkle_root() block4.rehash() block4.solve() self.log.info("Test inflation by duplicating input") peer.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
def run_test(self): node, = self.nodes self.nodes[0].add_p2p_connection(P2PDataStore()) tip = self.getbestblock(node) self.log.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) node.p2ps[0].send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] self.log.info("Mature the blocks and get out of IBD.") node.generatetoaddress(100, node.get_deterministic_priv_key().address) tip = self.getbestblock(node) self.log.info("Setting up spends to test and mining the fundings.") fundings = [] # Generate a key pair private_key = ECKey() private_key.set(b"Schnorr!" * 4, True) # get uncompressed public key serialization public_key = private_key.get_pubkey().get_bytes() def create_fund_and_spend_tx(dummy=OP_0, sigtype='ecdsa'): spendfrom = spendable_outputs.pop() script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) value = spendfrom.vout[0].nValue # Fund transaction txfund = create_tx_with_script(spendfrom, 0, b'', amount=value, script_pub_key=script) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId(script, txspend, 0, sighashtype, value) if sigtype == 'schnorr': txsig = private_key.sign_schnorr(sighash) + hashbyte elif sigtype == 'ecdsa': txsig = private_key.sign_ecdsa(sighash) + hashbyte txspend.vin[0].scriptSig = CScript([dummy, txsig]) txspend.rehash() return txspend # This is valid. ecdsa0tx = create_fund_and_spend_tx(OP_0, 'ecdsa') # This is invalid. ecdsa1tx = create_fund_and_spend_tx(OP_1, 'ecdsa') # This is invalid. schnorr0tx = create_fund_and_spend_tx(OP_0, 'schnorr') # This is valid. schnorr1tx = create_fund_and_spend_tx(OP_1, 'schnorr') tip = self.build_block(tip, fundings) node.p2ps[0].send_blocks_and_test([tip], node) self.log.info("Send a legacy ECDSA multisig into mempool.") node.p2ps[0].send_txs_and_test([ecdsa0tx], node) assert_equal(node.getrawmempool(), [ecdsa0tx.hash]) self.log.info("Trying to mine a non-null-dummy ECDSA.") self.check_for_ban_on_rejected_block(self.build_block(tip, [ecdsa1tx]), BADINPUTS_ERROR) self.log.info( "If we try to submit it by mempool or RPC, it is rejected and we are banned" ) assert_raises_rpc_error(-26, ECDSA_NULLDUMMY_ERROR, node.sendrawtransaction, ToHex(ecdsa1tx)) self.check_for_ban_on_rejected_tx(ecdsa1tx, ECDSA_NULLDUMMY_ERROR) self.log.info( "Submitting a Schnorr-multisig via net, and mining it in a block") node.p2ps[0].send_txs_and_test([schnorr1tx], node) assert_equal(set(node.getrawmempool()), {ecdsa0tx.hash, schnorr1tx.hash}) tip = self.build_block(tip, [schnorr1tx]) node.p2ps[0].send_blocks_and_test([tip], node) self.log.info( "That legacy ECDSA multisig is still in mempool, let's mine it") assert_equal(node.getrawmempool(), [ecdsa0tx.hash]) tip = self.build_block(tip, [ecdsa0tx]) node.p2ps[0].send_blocks_and_test([tip], node) assert_equal(node.getrawmempool(), []) self.log.info( "Trying Schnorr in legacy multisig is invalid and banworthy.") self.check_for_ban_on_rejected_tx(schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr0tx]), BADINPUTS_ERROR)
def run_test(self): node = self.nodes[0] node.add_p2p_connection(P2PDataStore()) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions=None): if new_transactions is None: new_transactions = [] block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() node.p2p.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() node.p2p.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(17): n = i + 1 node.p2p.send_blocks_and_test([block(n)], node) node.p2p.send_blocks_and_test([block(5556)], node) # Block with regular ordering are now rejected. node.p2p.send_blocks_and_test([block(5557, out[17], tx_count=16)], node, success=False, reject_reason='tx-ordering') # Rewind bad block. tip(5556) # After we activate the Nov 15, 2018 HF, transaction order is enforced. def ordered_block(block_number, spend): b = block(block_number, spend=spend, tx_count=16) make_conform_to_ctor(b) update_block(block_number) return b # Now that the fork activated, we need to order transaction per txid. node.p2p.send_blocks_and_test([ordered_block(4445, out[17])], node) node.p2p.send_blocks_and_test([ordered_block(4446, out[18])], node) # Generate a block with a duplicated transaction. double_tx_block = ordered_block(4447, out[19]) assert_equal(len(double_tx_block.vtx), 16) double_tx_block.vtx = double_tx_block.vtx[:8] + \ [double_tx_block.vtx[8]] + double_tx_block.vtx[8:] update_block(4447) node.p2p.send_blocks_and_test([self.tip], node, success=False, reject_reason='bad-txns-duplicate') # Rewind bad block. tip(4446) # Check over two blocks. proper_block = ordered_block(4448, out[20]) node.p2p.send_blocks_and_test([self.tip], node) replay_tx_block = ordered_block(4449, out[21]) assert_equal(len(replay_tx_block.vtx), 16) replay_tx_block.vtx.append(proper_block.vtx[5]) replay_tx_block.vtx = [replay_tx_block.vtx[0]] + \ sorted(replay_tx_block.vtx[1:], key=lambda tx: tx.get_id()) update_block(4449) node.p2p.send_blocks_and_test([self.tip], node, success=False, reject_reason='bad-txns-BIP30')
def run_test(self): node = self.nodes[0] node.add_p2p_connection(P2PDataStore()) node.setmocktime(REPLAY_PROTECTION_START_TIME) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand block = self.next_block # Create a new block block(0) save_spendable_output() node.p2p.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() node.p2p.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Generate a key pair to test P2SH sigops count private_key = ECKey() private_key.generate() public_key = private_key.get_pubkey().get_bytes() # This is a little handier to use than the version in blocktools.py def create_fund_and_spend_tx(spend, forkvalue=0): # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_tx_with_script(spend.tx, spend.n, b'', amount=50 * COIN - 1000, script_pub_key=script) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 2000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId(script, txspend, 0, sighashtype, 50 * COIN - 1000) sig = private_key.sign_ecdsa(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() return [txfund, txspend] def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert tx_id in set(node.getrawmempool()) return tx_id # Before the fork, no replay protection required to get in the mempool. txns = create_fund_and_spend_tx(out[0]) send_transaction_to_mempool(txns[0]) send_transaction_to_mempool(txns[1]) # And txns get mined in a block properly. block(1) update_block(1, txns) node.p2p.send_blocks_and_test([self.tip], node) # Replay protected transactions are rejected. replay_txns = create_fund_and_spend_tx(out[1], 0xffdead) send_transaction_to_mempool(replay_txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) # And block containing them are rejected as well. block(2) update_block(2, replay_txns) node.p2p.send_blocks_and_test([self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(1) # Create a block that would activate the replay protection. bfork = block(5555) bfork.nTime = REPLAY_PROTECTION_START_TIME - 1 update_block(5555, []) node.p2p.send_blocks_and_test([self.tip], node) activation_blocks = [] for i in range(5): block(5100 + i) activation_blocks.append(self.tip) node.p2p.send_blocks_and_test(activation_blocks, node) # Check we are just before the activation time assert_equal(node.getblockchaininfo()['mediantime'], REPLAY_PROTECTION_START_TIME - 1) # We are just before the fork, replay protected txns still are rejected assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) block(3) update_block(3, replay_txns) node.p2p.send_blocks_and_test([self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(5104) # Send some non replay protected txns in the mempool to check # they get cleaned at activation. txns = create_fund_and_spend_tx(out[2]) send_transaction_to_mempool(txns[0]) tx_id = send_transaction_to_mempool(txns[1]) # Activate the replay protection block(5556) node.p2p.send_blocks_and_test([self.tip], node) # Check we just activated the replay protection assert_equal(node.getblockchaininfo()['mediantime'], REPLAY_PROTECTION_START_TIME) # Non replay protected transactions are not valid anymore, # so they should be removed from the mempool. assert tx_id not in set(node.getrawmempool()) # Good old transactions are now invalid. send_transaction_to_mempool(txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(txns[1])) # They also cannot be mined block(4) update_block(4, txns) node.p2p.send_blocks_and_test([self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(5556) # The replay protected transaction is now valid replay_tx0_id = send_transaction_to_mempool(replay_txns[0]) replay_tx1_id = send_transaction_to_mempool(replay_txns[1]) # Make sure the transaction are ready to be mined. tmpl = node.getblocktemplate() found_id0 = False found_id1 = False for txn in tmpl['transactions']: txid = txn['txid'] if txid == replay_tx0_id: found_id0 = True elif txid == replay_tx1_id: found_id1 = True assert found_id0 and found_id1 # And the mempool is still in good shape. assert replay_tx0_id in set(node.getrawmempool()) assert replay_tx1_id in set(node.getrawmempool()) # They also can also be mined block(5) update_block(5, replay_txns) node.p2p.send_blocks_and_test([self.tip], node) # Ok, now we check if a reorg work properly across the activation. postforkblockid = node.getbestblockhash() node.invalidateblock(postforkblockid) assert replay_tx0_id in set(node.getrawmempool()) assert replay_tx1_id in set(node.getrawmempool()) # Deactivating replay protection. forkblockid = node.getbestblockhash() node.invalidateblock(forkblockid) # The funding tx is not evicted from the mempool, since it's valid in # both sides of the fork assert replay_tx0_id in set(node.getrawmempool()) assert replay_tx1_id not in set(node.getrawmempool()) # Check that we also do it properly on deeper reorg. node.reconsiderblock(forkblockid) node.reconsiderblock(postforkblockid) node.invalidateblock(forkblockid) assert replay_tx0_id in set(node.getrawmempool()) assert replay_tx1_id not in set(node.getrawmempool())
def run_test(self): self.mine_chain() node = self.nodes[0] def assert_submitblock(block, result_str_1, result_str_2=None): block.solve() result_str_2 = result_str_2 or 'duplicate-invalid' assert_equal(result_str_1, node.submitblock(hexdata=block.serialize().hex())) assert_equal(result_str_2, node.submitblock(hexdata=block.serialize().hex())) self.log.info('getmininginfo') mining_info = node.getmininginfo() assert_equal(mining_info['blocks'], 200) assert_equal(mining_info['chain'], self.chain) assert 'currentblocktx' not in mining_info assert 'currentblockweight' not in mining_info assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) assert_equal(mining_info['pooledtx'], 0) # Mine a block to leave initial block download node.generatetoaddress(1, node.get_deterministic_priv_key().address) tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS) self.log.info("getblocktemplate: Test capability advertised") assert 'proposal' in tmpl['capabilities'] assert 'coinbasetxn' not in tmpl next_height = int(tmpl["height"]) coinbase_tx = create_coinbase(height=next_height) # sequence numbers must not be max for nLockTime to have effect coinbase_tx.vin[0].nSequence = 2**32 - 2 coinbase_tx.rehash() block = CBlock() block.nVersion = tmpl["version"] block.hashPrevBlock = int(tmpl["previousblockhash"], 16) block.nTime = tmpl["curtime"] block.nBits = int(tmpl["bits"], 16) block.nNonce = 0 block.vtx = [coinbase_tx] self.log.info("getblocktemplate: segwit rule must be set") assert_raises_rpc_error( -8, "getblocktemplate must be called with the segwit rule set", node.getblocktemplate) self.log.info("getblocktemplate: Test valid block") assert_template(node, block, None) self.log.info("submitblock: Test block decode failure") assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex()) self.log.info( "getblocktemplate: Test bad input hash for coinbase transaction") bad_block = copy.deepcopy(block) bad_block.vtx[0].vin[0].prevout.hash += 1 bad_block.vtx[0].rehash() assert_template(node, bad_block, 'bad-cb-missing') self.log.info("submitblock: Test invalid coinbase transaction") assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, bad_block.serialize().hex()) self.log.info("getblocktemplate: Test truncated final transaction") assert_raises_rpc_error( -22, "Block decode failed", node.getblocktemplate, { 'data': block.serialize()[:-1].hex(), 'mode': 'proposal', 'rules': ['segwit'], }) self.log.info("getblocktemplate: Test duplicate transaction") bad_block = copy.deepcopy(block) bad_block.vtx.append(bad_block.vtx[0]) assert_template(node, bad_block, 'bad-txns-duplicate') assert_submitblock(bad_block, 'bad-txns-duplicate', 'bad-txns-duplicate') self.log.info("getblocktemplate: Test invalid transaction") bad_block = copy.deepcopy(block) bad_tx = copy.deepcopy(bad_block.vtx[0]) bad_tx.vin[0].prevout.hash = 255 bad_tx.rehash() bad_block.vtx.append(bad_tx) assert_template(node, bad_block, 'bad-txns-inputs-missingorspent') assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent') self.log.info("getblocktemplate: Test nonfinal transaction") bad_block = copy.deepcopy(block) bad_block.vtx[0].nLockTime = 2**32 - 1 bad_block.vtx[0].rehash() assert_template(node, bad_block, 'bad-txns-nonfinal') assert_submitblock(bad_block, 'bad-txns-nonfinal') self.log.info("getblocktemplate: Test bad tx count") # The tx count is immediately after the block header bad_block_sn = bytearray(block.serialize()) assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1) bad_block_sn[BLOCK_HEADER_SIZE] += 1 assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, { 'data': bad_block_sn.hex(), 'mode': 'proposal', 'rules': ['segwit'], }) self.log.info("getblocktemplate: Test bad bits") bad_block = copy.deepcopy(block) bad_block.nBits = 469762303 # impossible in the real world assert_template(node, bad_block, 'bad-diffbits') self.log.info("getblocktemplate: Test bad merkle root") bad_block = copy.deepcopy(block) bad_block.hashMerkleRoot += 1 assert_template(node, bad_block, 'bad-txnmrklroot', False) assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot') self.log.info("getblocktemplate: Test bad timestamps") bad_block = copy.deepcopy(block) bad_block.nTime = 2**31 - 1 assert_template(node, bad_block, 'time-too-new') assert_submitblock(bad_block, 'time-too-new', 'time-too-new') bad_block.nTime = 0 assert_template(node, bad_block, 'time-too-old') assert_submitblock(bad_block, 'time-too-old', 'time-too-old') self.log.info("getblocktemplate: Test not best block") bad_block = copy.deepcopy(block) bad_block.hashPrevBlock = 123 assert_template(node, bad_block, 'inconclusive-not-best-prevblk') assert_submitblock(bad_block, 'prev-blk-not-found', 'prev-blk-not-found') self.log.info('submitheader tests') assert_raises_rpc_error( -22, 'Block header decode failed', lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE)) assert_raises_rpc_error( -22, 'Block header decode failed', lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE - 2))) assert_raises_rpc_error( -25, 'Must submit previous header', lambda: node.submitheader( hexdata=super(CBlock, bad_block).serialize().hex())) block.nTime += 1 block.solve() def chain_tip(b_hash, *, status='headers-only', branchlen=1): return { 'hash': b_hash, 'height': 202, 'branchlen': branchlen, 'status': status } assert chain_tip(block.hash) not in node.getchaintips() node.submitheader(hexdata=block.serialize().hex()) assert chain_tip(block.hash) in node.getchaintips() node.submitheader( hexdata=CBlockHeader(block).serialize().hex()) # Noop assert chain_tip(block.hash) in node.getchaintips() bad_block_root = copy.deepcopy(block) bad_block_root.hashMerkleRoot += 2 bad_block_root.solve() assert chain_tip(bad_block_root.hash) not in node.getchaintips() node.submitheader( hexdata=CBlockHeader(bad_block_root).serialize().hex()) assert chain_tip(bad_block_root.hash) in node.getchaintips() # Should still reject invalid blocks, even if we have the header: assert_equal( node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot') assert_equal( node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot') assert chain_tip(bad_block_root.hash) in node.getchaintips() # We know the header for this invalid block, so should just return early without error: node.submitheader( hexdata=CBlockHeader(bad_block_root).serialize().hex()) assert chain_tip(bad_block_root.hash) in node.getchaintips() bad_block_lock = copy.deepcopy(block) bad_block_lock.vtx[0].nLockTime = 2**32 - 1 bad_block_lock.vtx[0].rehash() bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root() bad_block_lock.solve() assert_equal( node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal') assert_equal( node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid') # Build a "good" block on top of the submitted bad block bad_block2 = copy.deepcopy(block) bad_block2.hashPrevBlock = bad_block_lock.sha256 bad_block2.solve() assert_raises_rpc_error( -25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader( bad_block2).serialize().hex())) # Should reject invalid header right away bad_block_time = copy.deepcopy(block) bad_block_time.nTime = 1 bad_block_time.solve() assert_raises_rpc_error( -25, 'time-too-old', lambda: node.submitheader( hexdata=CBlockHeader(bad_block_time).serialize().hex())) # Should ask for the block from a p2p node, if they announce the header as well: peer = node.add_p2p_connection(P2PDataStore()) peer.wait_for_getheaders(timeout=5) # Drop the first getheaders peer.send_blocks_and_test(blocks=[block], node=node) # Must be active now: assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips() # Building a few blocks should give the same results node.generatetoaddress(10, node.get_deterministic_priv_key().address) assert_raises_rpc_error( -25, 'time-too-old', lambda: node.submitheader( hexdata=CBlockHeader(bad_block_time).serialize().hex())) assert_raises_rpc_error( -25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader( bad_block2).serialize().hex())) node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) node.submitheader( hexdata=CBlockHeader(bad_block_root).serialize().hex()) assert_equal(node.submitblock(hexdata=block.serialize().hex()), 'duplicate') # valid
def run_test(self): self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore()) self.miniwallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) self.log.info("Generate blocks in the past for coinbase outputs.") long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time self.coinbase_blocks = self.miniwallet.generate(COINBASE_BLOCK_COUNT) # blocks generated for inputs self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build self.last_block_time = long_past_time self.tip = int(self.nodes[0].getbestblockhash(), 16) # Activation height is hardcoded # We advance to block height five below BIP112 activation for the following tests test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT) self.send_blocks(test_blocks) assert not softfork_active(self.nodes[0], 'csv') # Inputs at height = 431 # # Put inputs for all tests in the chain at height 431 (tip now = 430) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for _ in range(16): bip68inputs.append(self.send_generic_input_tx(self.coinbase_blocks)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for _ in range(2): inputs = [] for _ in range(16): inputs.append(self.send_generic_input_tx(self.coinbase_blocks)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for _ in range(2): inputs = [] for _ in range(16): inputs.append(self.send_generic_input_tx(self.coinbase_blocks)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112specialinput = self.send_generic_input_tx(self.coinbase_blocks) # 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig) bip112emptystackinput = self.send_generic_input_tx(self.coinbase_blocks) # 1 normal input bip113input = self.send_generic_input_tx(self.coinbase_blocks) self.nodes[0].setmocktime(self.last_block_time + 600) inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 431 self.nodes[0].setmocktime(0) self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2) self.send_blocks(test_blocks) assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2) self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1)) assert not softfork_active(self.nodes[0], 'csv') # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to put in appropriate block time bip113tx_v1 = self.create_self_transfer_from_utxo(bip113input) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = self.create_self_transfer_from_utxo(bip113input) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1) bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1) bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1) bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1) bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) # (empty stack) OP_CSV input bip112tx_emptystack_v1 = self.create_bip112emptystack(bip112emptystackinput, 1) bip112tx_emptystack_v2 = self.create_bip112emptystack(bip112emptystackinput, 2) self.log.info("TESTING") self.log.info("Pre-Soft Fork Tests. All txs should pass.") self.log.info("Test version 1 txs") success_txs = [] # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block self.miniwallet.sign_tx(bip113tx_v1) success_txs.append(bip113tx_v1) success_txs.append(bip112tx_special_v1) success_txs.append(bip112tx_emptystack_v1) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") success_txs = [] # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block self.miniwallet.sign_tx(bip113tx_v2) success_txs.append(bip113tx_v2) success_txs.append(bip112tx_special_v2) success_txs.append(bip112tx_emptystack_v2) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 432 so the fork should now be active for the next block assert not softfork_active(self.nodes[0], 'csv') test_blocks = self.generate_blocks(1) self.send_blocks(test_blocks) assert softfork_active(self.nodes[0], 'csv') self.log.info("Post-Soft Fork Tests.") self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block self.miniwallet.sign_tx(bip113tx_v1) bip113tx_v1.rehash() bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block self.miniwallet.sign_tx(bip113tx_v2) bip113tx_v2.rehash() for bip113tx in [bip113tx_v1, bip113tx_v2]: self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal') # BIP 113 tests should now pass if the locktime is < MTP bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block self.miniwallet.sign_tx(bip113tx_v1) bip113tx_v1.rehash() bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block self.miniwallet.sign_tx(bip113tx_v2) bip113tx_v2.rehash() for bip113tx in [bip113tx_v1, bip113tx_v2]: self.send_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 437 after 4 blocks of random version test_blocks = self.generate_blocks(4) self.send_blocks(test_blocks) self.log.info("BIP 68 tests") self.log.info("Test version 1 txs - all should still pass") success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal') bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal') # Advance one block to 438 test_blocks = self.generate_blocks(1) self.send_blocks(test_blocks) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal') # Advance one block to 439 test_blocks = self.generate_blocks(1) self.send_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("BIP 112 tests") self.log.info("Test version 1 txs") # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False, reject_reason='non-mandatory-script-verify-flag (Negative locktime)') self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False, reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)') # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)') self.log.info("Test version 2 txs") # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False, reject_reason='non-mandatory-script-verify-flag (Negative locktime)') self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False, reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)') # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)') # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)') # If sequencelock types mismatch, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)') # Remaining txs should pass, just test masking works properly success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG self.miniwallet.sign_tx(tx) tx.rehash() time_txs.append(tx) self.send_blocks([self.create_test_block(time_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
def run_test(self): node = self.nodes[0] peer = node.add_p2p_connection(P2PDataStore()) node.setmocktime(ACTIVATION_TIME) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # send a txn to the mempool and check it was accepted def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert tx_id in node.getrawmempool() # checks the mempool has exactly the same txns as in the provided list def check_mempool_equal(txns): assert set(node.getrawmempool()) == set(tx.hash for tx in txns) # Create an always-valid chained transaction. It spends a # scriptPub=OP_TRUE coin into another. Returns the transaction and its # spendable output for further chaining. def create_always_valid_chained_tx(spend): tx = create_tx_with_script( spend.tx, spend.n, b'', amount=spend.tx.vout[0].nValue - 1000, script_pub_key=CScript([OP_TRUE])) tx.rehash() return tx, PreviousSpendableOutput(tx, 0) # shorthand block = self.next_block # Create a new block block(0) save_spendable_output() peer.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(110): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() peer.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Create 2 pre-fork-only txns (tx_pre0, tx_pre1). Fund txns are valid # pre-fork, so we can mine them right away. txfund0, tx_pre0 = create_fund_and_pre_fork_only_tx(out[0]) txfund1, tx_pre1 = create_fund_and_pre_fork_only_tx(out[1]) # Create 2 post-fork-only txns (tx_post0, tx_post1). Fund txns are # valid pre-fork, so we can mine them right away. txfund2, tx_post0 = create_fund_and_post_fork_only_tx(out[2]) txfund3, tx_post1 = create_fund_and_post_fork_only_tx(out[3]) # Create blocks to activate the fork. Mine all funding transactions. bfork = block(5555) bfork.nTime = ACTIVATION_TIME - 1 update_block(5555, [txfund0, txfund1, txfund2, txfund3]) peer.send_blocks_and_test([self.tip], node) for i in range(5): peer.send_blocks_and_test([block(5200 + i)], node) # Check we are just before the activation time assert_equal( node.getblockchaininfo()['mediantime'], ACTIVATION_TIME - 1) # We are just before the fork. Pre-fork-only and always-valid chained # txns (tx_chain0, tx_chain1) are valid, post-fork-only txns are # rejected. send_transaction_to_mempool(tx_pre0) send_transaction_to_mempool(tx_pre1) tx_chain0, last_chained_output = create_always_valid_chained_tx(out[4]) tx_chain1, last_chained_output = create_always_valid_chained_tx( last_chained_output) send_transaction_to_mempool(tx_chain0) send_transaction_to_mempool(tx_chain1) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post0)) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post1)) check_mempool_equal([tx_chain0, tx_chain1, tx_pre0, tx_pre1]) # Activate the fork. Mine the 1st always-valid chained txn and a # pre-fork-only txn. block(5556) update_block(5556, [tx_chain0, tx_pre0]) peer.send_blocks_and_test([self.tip], node) forkblockid = node.getbestblockhash() # Check we just activated the fork assert_equal(node.getblockheader(forkblockid)['mediantime'], ACTIVATION_TIME) # Check mempool coherence when activating the fork. Pre-fork-only txns # were evicted from the mempool, while always-valid txns remain. # Evicted: tx_pre1 check_mempool_equal([tx_chain1]) # Post-fork-only and always-valid txns are accepted, pre-fork-only txn # are rejected. send_transaction_to_mempool(tx_post0) send_transaction_to_mempool(tx_post1) tx_chain2, _ = create_always_valid_chained_tx(last_chained_output) send_transaction_to_mempool(tx_chain2) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_pre1)) check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1]) # Mine the 2nd always-valid chained txn and a post-fork-only txn. block(5557) update_block(5557, [tx_chain1, tx_post0]) peer.send_blocks_and_test([self.tip], node) postforkblockid = node.getbestblockhash() # The mempool contains the 3rd chained txn and a post-fork-only txn. check_mempool_equal([tx_chain2, tx_post1]) # In the following we will testing block disconnections and reorgs. # - tx_chain2 will always be retained in the mempool since it is always # valid. Its continued presence shows that we are never simply # clearing the entire mempool. # - tx_post1 may be evicted from mempool if we land before the fork. # - tx_post0 is in a block and if 'de-mined', it will either be evicted # or end up in mempool depending if we land before/after the fork. # - tx_pre0 is in a block and if 'de-mined', it will either be evicted # or end up in mempool depending if we land after/before the fork. # First we do a disconnection of the post-fork block, which is a # normal disconnection that merely returns the block contents into # the mempool -- nothing is lost. node.invalidateblock(postforkblockid) # In old mempool: tx_chain2, tx_post1 # Recovered from blocks: tx_chain1 and tx_post0. # Lost from blocks: NONE # Retained from old mempool: tx_chain2, tx_post1 # Evicted from old mempool: NONE check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1]) # Now, disconnect the fork block. This is a special disconnection # that requires reprocessing the mempool due to change in rules. node.invalidateblock(forkblockid) # In old mempool: tx_chain1, tx_chain2, tx_post0, tx_post1 # Recovered from blocks: tx_chain0, tx_pre0 # Lost from blocks: NONE # Retained from old mempool: tx_chain1, tx_chain2 # Evicted from old mempool: tx_post0, tx_post1 check_mempool_equal([tx_chain0, tx_chain1, tx_chain2, tx_pre0]) # Restore state node.reconsiderblock(postforkblockid) node.reconsiderblock(forkblockid) send_transaction_to_mempool(tx_post1) check_mempool_equal([tx_chain2, tx_post1]) # Test a reorg that crosses the fork. # If such a reorg happens, most likely it will both start *and end* # after the fork. We will test such a case here and make sure that # post-fork-only transactions are not unnecessarily discarded from # the mempool in such a reorg. Pre-fork-only transactions however can # get lost. # Set up a longer competing chain that doesn't confirm any of our txns. # This starts after 5204, so it contains neither the forkblockid nor # the postforkblockid from above. self.tip = self.blocks[5204] reorg_blocks = [] for i in range(3): reorg_blocks.append(block(5900 + i)) # Perform the reorg peer.send_blocks_and_test(reorg_blocks, node) # reorg finishes after the fork assert_equal( node.getblockchaininfo()['mediantime'], ACTIVATION_TIME + 2) # In old mempool: tx_chain2, tx_post1 # Recovered from blocks: tx_chain0, tx_chain1, tx_post0 # Lost from blocks: tx_pre0 # Retained from old mempool: tx_chain2, tx_post1 # Evicted from old mempool: NONE check_mempool_equal( [tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1])