def SegwitVersion1SignatureHash_legacy(script, txTo, inIdx, hashtype, amount): """ This method is identical to the regular `SegwitVersion1SignatureHash` method, but without support for SIGHASH_RANGEPROOF. So basically it's the old version of the method from before the new sighash flag was added. """ hashPrevouts = 0 hashSequence = 0 hashIssuance = 0 hashOutputs = 0 if not (hashtype & SIGHASH_ANYONECANPAY): serialize_prevouts = bytes() for i in txTo.vin: serialize_prevouts += i.prevout.serialize() hashPrevouts = uint256_from_str(hash256(serialize_prevouts)) if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE): serialize_sequence = bytes() for i in txTo.vin: serialize_sequence += struct.pack("<I", i.nSequence) hashSequence = uint256_from_str(hash256(serialize_sequence)) if not (hashtype & SIGHASH_ANYONECANPAY): serialize_issuance = bytes() # TODO actually serialize issuances for _ in txTo.vin: serialize_issuance += b'\x00' hashIssuance = uint256_from_str(hash256(serialize_issuance)) if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE): serialize_outputs = bytes() for o in txTo.vout: serialize_outputs += o.serialize() hashOutputs = uint256_from_str(hash256(serialize_outputs)) elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)): serialize_outputs = txTo.vout[inIdx].serialize() hashOutputs = uint256_from_str(hash256(serialize_outputs)) ss = bytes() ss += struct.pack("<i", txTo.nVersion) ss += ser_uint256(hashPrevouts) ss += ser_uint256(hashSequence) ss += ser_uint256(hashIssuance) ss += txTo.vin[inIdx].prevout.serialize() ss += ser_string(script) ss += amount.serialize() ss += struct.pack("<I", txTo.vin[inIdx].nSequence) ss += ser_uint256(hashOutputs) ss += struct.pack("<i", txTo.nLockTime) ss += struct.pack("<I", hashtype) return hash256(ss)
def mine_msg_txn_incorrectly(self, tip_height, tip_hash): nonce = 0 op_return_data = self.create_op_return_data(tip_height, tip_hash, nonce) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(0, 0xfffffffe), b"", 0xffffffff)) tx.vout.append(CTxOut(0, CScript([OP_RETURN, op_return_data]))) tx.nLockTime = self.tx_time tx.mine() tx.rehash() self.tx_time += 1 lower_bound = get_target(tx) upper_bound = uint256_from_str( hex_str_to_bytes( "00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" )[::-1]) while tx.sha256s ^ 0x8000000000000000000000000000000000000000000000000000000000000000 <= lower_bound or \ tx.sha256s ^ 0x8000000000000000000000000000000000000000000000000000000000000000 > upper_bound: nonce += 1 op_return_data[-4:] = struct.pack("<I", nonce) tx.vout[0] = CTxOut(0, CScript([OP_RETURN, op_return_data])) tx.mine() tx.rehash() return tx
def merkle_root_from_cb_and_branch(cbhash, branch): """ Given a coinbase tx hash (bytes) and a merkle branch (list of bytes), calculate the merkle root. The merkle root is returned as a uint256 suitable for setting a CBlock.hashMerkleRoot """ hashes = [cbhash] + branch while len(hashes) > 1: hashes[0] = hash256(hashes[0] + hashes[1]) del hashes[1] return messages.uint256_from_str(hashes[0]) # this is now the root
def is_block_hash_in_inv_predicate(self, block_hash): mininode = self.nodes[0].p2p hash_uint256 = uint256_from_str(unhexlify(block_hash)[::-1]) def is_block_hash_in_inv(): msg = mininode.last_message.get('getdata', None) if msg is None: return False return hash_uint256 in [cinv.hash for cinv in msg.inv] return is_block_hash_in_inv
def pegin_test(self, sighash_ty): # Peg-in prep: # Hack: since we're not validating peg-ins in parent chain, just make # both the funding and claim tx on same chain (printing money) fund_info = self.nodes[0].getpeginaddress() peg_id = self.nodes[0].sendtoaddress(fund_info["mainchain_address"], 1) raw_peg_tx = self.nodes[0].gettransaction(peg_id)["hex"] peg_txid = self.nodes[0].sendrawtransaction(raw_peg_tx) self.nodes[0].generate(101) peg_prf = self.nodes[0].gettxoutproof([peg_txid]) claim_script = fund_info["claim_script"] # Create a pegin transaction # We have to manually supply claim script, otherwise the wallet will pick raw_claim = self.nodes[0].createrawpegin(raw_peg_tx, peg_prf, claim_script) raw_claim = FromHex(CTransaction(), raw_claim['hex']) # Create a taproot utxo tx, prev_vout, spk, sec, pub, tweak = self.create_taproot_utxo() # Spend the pegin and taproot tx together raw_claim.vin.append(CTxIn(COutPoint(tx.sha256, prev_vout))) raw_claim.vout.append( CTxOut(nValue=CTxOutValue(12 * 10**7), scriptPubKey=spk)) # send back to self signed = self.nodes[0].signrawtransactionwithwallet( raw_claim.serialize().hex()) raw_claim = FromHex(CTransaction(), signed['hex']) genesis_hash = uint256_from_str( bytes.fromhex(self.nodes[0].getblockhash(0))[::-1]) peg_utxo = CTxOut() peg_utxo.from_pegin_witness_data( raw_claim.wit.vtxinwit[0].peginWitness) msg = TaprootSignatureHash(raw_claim, [peg_utxo, tx.vout[prev_vout]], sighash_ty, genesis_hash, 1) # compute the tweak tweak_sk = tweak_add_privkey(sec, tweak) sig = sign_schnorr(tweak_sk, msg) raw_claim.wit.vtxinwit[1].scriptWitness.stack = [ taproot_pad_sighash_ty(sig, sighash_ty) ] pub_tweak = tweak_add_pubkey(pub, tweak)[0] assert (verify_schnorr(pub_tweak, sig, msg)) # Since we add in/outputs the min feerate is no longer maintained. self.nodes[0].sendrawtransaction(hexstring=raw_claim.serialize().hex()) self.nodes[0].generate(1) last_blk = self.nodes[0].getblock(self.nodes[0].getbestblockhash()) raw_claim.rehash() assert (raw_claim.hash in last_blk['tx'])
def get_target(txn): block_subsidy = 5000000000 txn_cost = get_txn_cost(txn) ratio = block_subsidy // txn_cost block_target = uint256_from_str( hex_str_to_bytes( "0000000000ffff00000000000000000000000000000000000000000000000000") [::-1]) target = block_target * ratio * TARGET_MULTIPLIER return target
def issuance_test(self, sighash_ty): tx, prev_vout, spk, sec, pub, tweak = self.create_taproot_utxo() blind_addr = self.nodes[0].getnewaddress() nonblind_addr = self.nodes[0].validateaddress( blind_addr)['unconfidential'] raw_tx = self.nodes[0].createrawtransaction([], [{nonblind_addr: 1}]) raw_tx = FromHex(CTransaction(), raw_tx) # Need to taproot outputs later because fundrawtransaction cannot estimate fees # prev out has value 1.2 btc in_total = tx.vout[prev_vout].nValue.getAmount() fees = 100 raw_tx.vin.append(CTxIn(COutPoint(tx.sha256, prev_vout))) raw_tx.vout.append( CTxOut(nValue=CTxOutValue(in_total - fees - 10**8), scriptPubKey=spk)) # send back to self raw_tx.vout.append(CTxOut(nValue=CTxOutValue(fees))) # issued_tx = raw_tx.serialize().hex() blind_addr = self.nodes[0].getnewaddress() issue_addr = self.nodes[0].validateaddress( blind_addr)['unconfidential'] issued_tx = self.nodes[0].rawissueasset( raw_tx.serialize().hex(), [{ "asset_amount": 2, "asset_address": issue_addr, "blind": False }])[0]["hex"] # blind_tx = self.nodes[0].blindrawtransaction(issued_tx) # This is a no-op genesis_hash = uint256_from_str( bytes.fromhex(self.nodes[0].getblockhash(0))[::-1]) issued_tx = FromHex(CTransaction(), issued_tx) issued_tx.wit.vtxoutwit = [CTxOutWitness()] * len(issued_tx.vout) issued_tx.wit.vtxinwit = [CTxInWitness()] * len(issued_tx.vin) msg = TaprootSignatureHash(issued_tx, [tx.vout[prev_vout]], sighash_ty, genesis_hash, 0) # compute the tweak tweak_sk = tweak_add_privkey(sec, tweak) sig = sign_schnorr(tweak_sk, msg) issued_tx.wit.vtxinwit[0].scriptWitness.stack = [ taproot_pad_sighash_ty(sig, sighash_ty) ] pub_tweak = tweak_add_pubkey(pub, tweak)[0] assert (verify_schnorr(pub_tweak, sig, msg)) # Since we add in/outputs the min feerate is no longer maintained. self.nodes[0].sendrawtransaction(hexstring=issued_tx.serialize().hex()) self.nodes[0].generate(1) last_blk = self.nodes[0].getblock(self.nodes[0].getbestblockhash()) issued_tx.rehash() assert (issued_tx.hash in last_blk['tx'])
def receive_thread_nevm(self, idx, subscriber): while True: try: self.log.info('receive_thread_nevm waiting to receive... idx {}'.format(idx)) data = subscriber.receive() if data[0] == b"nevmcomms": subscriber.send([b"nevmcomms", b"ack"]) elif data[0] == b"nevmblock": hashStr = hash256(str(random.randint(-0x80000000, 0x7fffffff)).encode()) hashTopic = uint256_from_str(hashStr) nevmBlock = CNEVMBlock(hashTopic, hashTopic, hashTopic, b"nevmblock") subscriber.send([b"nevmblock", nevmBlock.serialize()]) elif data[0] == b"nevmconnect": evmBlockConnect = CNEVMBlockConnect() evmBlockConnect.deserialize(BytesIO(data[1])) resBlock = subscriber.addBlock(evmBlockConnect) res = b"" if resBlock: res = b"connected" else: res = b"not connected" # stay paused during delay test while subscriber.artificialDelay == True: sleep(0.1) subscriber.send([b"nevmconnect", res]) elif data[0] == b"nevmdisconnect": evmBlockDisconnect = CNEVMBlockDisconnect() evmBlockDisconnect.deserialize(BytesIO(data[1])) resBlock = subscriber.deleteBlock(evmBlockDisconnect) res = b"" if resBlock: res = b"disconnected" else: res = b"not disconnected" subscriber.send([b"nevmdisconnect", res]) else: self.log.info("Unknown topic in REQ {}".format(data)) except zmq.ContextTerminated: sleep(1) break except zmq.ZMQError: self.log.warning('zmq error, socket closed unexpectedly.') sleep(1) break
def get_empty_block(self, sync_height): sync_blocks(self.nodes, height=sync_height) node0 = self.nodes[0] hashprev = uint256_from_str(unhexlify(node0.getbestblockhash())[::-1]) snapshot_hash = get_tip_snapshot_meta(node0).hash if len(self.spendable_outputs) > 0: block_time = self.spendable_outputs[-1].nTime + 1 else: block_time = int(time_time()) + 2 block = create_block(hashprev=hashprev, coinbase=sign_coinbase( self.nodes[0], create_coinbase(height=sync_height + 1, stake=node0.listunspent()[0], snapshot_hash=snapshot_hash)), nTime=block_time) block.solve() return block
def _check_algorithm_sanity(self): ctx = ContextInfoContainer() ctx.height = 1337 ctx.keystone1 = "010203" ctx.keystone2 = "040506" assert_equal( ctx.getHash().hex(), "db35aad09a65b667a6c9e09cbd47b8d6b378b9ec705db604a4d5cd489afd2bc6") txroot = uint256_from_str( bytes.fromhex( "bf9fb4901a0d8fc9b0d3bf38546191f77a3f2ea5d543546aac0574290c0a9e83" )) poproot = EMPTY_POPDATA_ROOT_V1 tlmr = _calculateTopLevelMerkleRoot(txRoot=txroot, popDataRoot=poproot, ctx=ctx) tlmr_hex = ser_uint256(tlmr).hex() assert_equal( tlmr_hex, "700c1abb69dd1899796b4cafa81c0eefa7b7d0c5aaa4b2bcb67713b2918edb52")
def run_test(self): node = self.nodes[0] # convenience reference to the node self.bootstrap_p2p() # Add one p2p connection to the node self.block_heights = {} self.coinbase_key = ECKey() self.coinbase_key.generate() self.coinbase_pubkey = self.coinbase_key.get_pubkey().get_bytes() self.tip = None self.blocks = {} self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 self.spendable_outputs = [] # Create a new block b0 = self.next_block(0) self.save_spendable_output() self.sync_blocks([b0]) # Allow the block to mature blocks = [] for i in range(99): blocks.append(self.next_block(5000 + i)) self.save_spendable_output() self.sync_blocks(blocks) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(33): out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) b1 = self.next_block(1, spend=out[0]) self.save_spendable_output() b2 = self.next_block(2, spend=out[1]) self.save_spendable_output() self.sync_blocks([b1, b2]) # Fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. self.log.info("Don't reorg to a chain of the same length") self.move_tip(1) b3 = self.next_block(3, spend=out[1]) txout_b3 = b3.vtx[1] self.sync_blocks([b3], False) # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) self.log.info("Reorg to a longer chain") b4 = self.next_block(4, spend=out[2]) self.sync_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) self.move_tip(2) b5 = self.next_block(5, spend=out[2]) self.save_spendable_output() self.sync_blocks([b5], False) self.log.info("Reorg back to the original chain") b6 = self.next_block(6, spend=out[3]) self.sync_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain with a double spend, even if it is longer") self.move_tip(5) b7 = self.next_block(7, spend=out[2]) self.sync_blocks([b7], False) b8 = self.next_block(8, spend=out[4]) self.sync_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block where the miner creates too much coinbase reward") self.move_tip(6) b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) self.sync_blocks([b9], success=False, reject_reason='bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") self.move_tip(5) b10 = self.next_block(10, spend=out[3]) self.sync_blocks([b10], False) b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) self.sync_blocks([b11], success=False, reject_reason='bad-cb-amount', reconnect=True) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") self.move_tip(5) b12 = self.next_block(12, spend=out[3]) self.save_spendable_output() b13 = self.next_block(13, spend=out[4]) self.save_spendable_output() b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) self.sync_blocks([b12, b13, b14], success=False, reject_reason='bad-cb-amount', reconnect=True) # New tip should be b13. assert_equal(node.getbestblockhash(), b13.hash) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(13) b15 = self.next_block(15) self.save_spendable_output() self.sync_blocks([b15], True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block with a spend from a re-org'ed out tx") self.move_tip(15) b17 = self.next_block(17, spend=txout_b3) self.sync_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with a spend from a re-org'ed out tx (on a forked chain)") self.move_tip(13) b18 = self.next_block(18, spend=txout_b3) self.sync_blocks([b18], False) b19 = self.next_block(19, spend=out[6]) self.sync_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block spending an immature coinbase.") self.move_tip(15) b20 = self.next_block(20, spend=out[7]) self.sync_blocks([b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block spending an immature coinbase (on a forked chain)") self.move_tip(13) b21 = self.next_block(21, spend=out[6]) self.sync_blocks([b21], False) b22 = self.next_block(22, spend=out[5]) self.sync_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) self.log.info("Accept a block of size LEGACY_MAX_BLOCK_SIZE") self.move_tip(15) b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) self.sync_blocks([b23], True) self.save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with coinbase input script size out of range") self.move_tip(15) b26 = self.next_block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = self.update_block(26, []) self.sync_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = self.next_block(27, spend=out[7]) self.sync_blocks([b27], False) # Now try a too-large-coinbase script self.move_tip(15) b28 = self.next_block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = self.update_block(28, []) self.sync_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = self.next_block(29, spend=out[7]) self.sync_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. self.move_tip(23) b30 = self.next_block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = self.update_block(30, []) self.sync_blocks([b30], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py b31 = self.next_block(31) self.save_spendable_output() b33 = self.next_block(33) self.save_spendable_output() b35 = self.next_block(35) self.save_spendable_output() self.sync_blocks([b31, b33, b35], True) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block self.log.info( "Reject a block spending transaction from a block which failed to connect") self.move_tip(35) b37 = self.next_block(37, spend=out[11]) txout_b37 = b37.vtx[1] tx = self.create_and_sign_transaction(out[11], 0) b37 = self.update_block(37, [tx]) self.sync_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid self.move_tip(35) b38 = self.next_block(38, spend=txout_b37) self.sync_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(35) b39 = self.next_block(39) self.save_spendable_output() b41 = self.next_block(41) self.sync_blocks([b39, b41], True) # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # self.move_tip(39) b42 = self.next_block(42, spend=out[12]) self.save_spendable_output() b43 = self.next_block(43, spend=out[13]) self.save_spendable_output() self.sync_blocks([b42, b43], True) # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. self.log.info("Build block 44 manually") height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 self.sync_blocks([b44], True) self.log.info("Reject a block with a non-coinbase as the first tx") non_coinbase = self.create_tx(out[15], 0, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.block_heights[b45.sha256] = self.block_heights[ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 self.sync_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with no transactions") self.move_tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 self.sync_blocks([b46], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with invalid work") self.move_tip(44) b47 = self.next_block(47, solve=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: b47.nNonce += 1 b47.rehash() self.sync_blocks([b47], False, request_block=False) self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) b48 = self.next_block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() self.sync_blocks([b48], False, request_block=False) self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() self.sync_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True) self.log.info("Reject a block with incorrect POW limit") self.move_tip(44) b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() self.sync_blocks([b50], False, request_block=False, reconnect=True) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = self.update_block(51, [cb2]) self.sync_blocks([b51], success=False, reject_reason='bad-tx-coinbase', reconnect=True) self.log.info("Reject a block with duplicate transactions") self.move_tip(44) b52 = self.next_block(52, spend=out[15]) b52 = self.update_block(52, [b52.vtx[1]]) self.sync_blocks([b52], success=False, reject_reason='tx-duplicate', reconnect=True) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # self.move_tip(43) b53 = self.next_block(53, spend=out[14]) self.sync_blocks([b53], False) self.save_spendable_output() self.log.info("Reject a block with timestamp before MedianTimePast") b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() self.sync_blocks([b54], False, request_block=False) # valid timestamp self.move_tip(53) b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime self.update_block(55, []) self.sync_blocks([b55], True) self.save_spendable_output() # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end self.move_tip(55) b57 = self.next_block(57) tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) b57 = self.update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx self.log.info( "Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = self.update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) self.sync_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end self.move_tip(55) b57p2 = self.next_block("57p2") tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) tx2 = self.create_tx(tx1, 0, 1) tx3 = self.create_tx(tx2, 0, 1) tx4 = self.create_tx(tx3, 0, 1) b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's self.log.info( "Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = self.update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) self.sync_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip("57p2") self.sync_blocks([b57p2], True) self.move_tip(57) # The tip is not updated because 57p2 seen first self.sync_blocks([b57], False) self.save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range self.log.info( "Reject a block with a transaction with prevout.n out of range") self.move_tip(57) b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = self.update_block(58, [tx]) self.sync_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # tx with output value > input value self.log.info( "Reject a block with a transaction with outputs > inputs") self.move_tip(57) b59 = self.next_block(59) tx = self.create_and_sign_transaction(out[17], 51 * COIN) b59 = self.update_block(59, [tx]) self.sync_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True) # reset to good chain self.move_tip(57) b60 = self.next_block(60, spend=out[17]) self.sync_blocks([b60], True) self.save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # self.log.info( "Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") self.move_tip(60) b61 = self.next_block(61, spend=out[18]) # Equalize the coinbases b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) self.sync_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # self.log.info( "Reject a block with a transaction with a nonfinal locktime") self.move_tip(60) b62 = self.next_block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final # don't set nSequence tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert tx.vin[0].nSequence < 0xffffffff tx.calc_sha256() b62 = self.update_block(62, [tx]) self.sync_blocks([b62], success=False, reject_reason='bad-txns-nonfinal') # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # self.log.info( "Reject a block with a coinbase transaction with a nonfinal locktime") self.move_tip(60) b63 = self.next_block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = self.update_block(63, []) self.sync_blocks([b63], success=False, reject_reason='bad-txns-nonfinal') # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # self.log.info( "Accept a valid block even if a bloated version of the block has previously been sent") self.move_tip(60) regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.blocks["64a"] = b64a self.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) self.sync_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()') # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently # resend the header message, it won't send us the getdata message again. Just # disconnect and reconnect and then call sync_blocks. # TODO: improve this test to be less dependent on P2P DOS behaviour. node.disconnect_p2ps() self.reconnect_p2p() self.move_tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 b64 = self.update_block(64, []) self.sync_blocks([b64], True) self.save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # self.log.info( "Accept a block with a transaction spending an output created in the same block") self.move_tip(64) b65 = self.next_block(65) tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 0) b65 = self.update_block(65, [tx1, tx2]) self.sync_blocks([b65], True) self.save_spendable_output() # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # self.log.info( "Reject a block with a transaction double spending a transaction created in the same block") self.move_tip(65) b67 = self.next_block(67) tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 1) tx3 = self.create_and_sign_transaction(tx1, 2) b67 = self.update_block(67, [tx1, tx2, tx3]) self.sync_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # self.log.info( "Reject a block trying to claim too much subsidy in the coinbase transaction") self.move_tip(65) b68 = self.next_block(68, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 9) b68 = self.update_block(68, [tx]) self.sync_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True) self.log.info( "Accept a block claiming the correct subsidy in the coinbase transaction") self.move_tip(65) b69 = self.next_block(69, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 10) self.update_block(69, [tx]) self.sync_blocks([b69], True) self.save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # self.log.info( "Reject a block containing a transaction spending from a non-existent input") self.move_tip(69) b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) b70 = self.update_block(70, [tx]) self.sync_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, # it has the same hash as b72. self.log.info( "Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") self.move_tip(69) b72 = self.next_block(72) tx1 = self.create_and_sign_transaction(out[21], 2) tx2 = self.create_and_sign_transaction(tx1, 1) b72 = self.update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) # add duplicate last transaction b71.vtx.append(b72.vtx[-1]) # b71 builds off b69 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 self.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) self.move_tip(71) self.sync_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip(72) self.sync_blocks([b72], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py b75 = self.next_block(75) self.save_spendable_output() b76 = self.next_block(76) self.save_spendable_output() self.sync_blocks([b75, b76], True) # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) self.log.info("Test transaction resurrection during a re-org") self.move_tip(76) b77 = self.next_block(77) tx77 = self.create_and_sign_transaction(out[24], 10 * COIN) b77 = self.update_block(77, [tx77]) self.sync_blocks([b77], True) self.save_spendable_output() b78 = self.next_block(78) tx78 = self.create_tx(tx77, 0, 9 * COIN) b78 = self.update_block(78, [tx78]) self.sync_blocks([b78], True) b79 = self.next_block(79) tx79 = self.create_tx(tx78, 0, 8 * COIN) b79 = self.update_block(79, [tx79]) self.sync_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) self.move_tip(77) b80 = self.next_block(80, spend=out[25]) self.sync_blocks([b80], False, request_block=False) self.save_spendable_output() b81 = self.next_block(81, spend=out[26]) # other chain is same length self.sync_blocks([b81], False, request_block=False) self.save_spendable_output() b82 = self.next_block(82, spend=out[27]) # now this chain is longer, triggers re-org self.sync_blocks([b82], True) self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert tx78.hash in mempool assert tx79.hash in mempool # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # self.log.info( "Accept a block with invalid opcodes in dead execution paths") b83 = self.next_block(83) op_codes = [OP_IF, INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = self.create_and_sign_transaction( out[28], out[28].vout[0].nValue, script) tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() b83 = self.update_block(83, [tx1, tx2]) self.sync_blocks([b83], True) self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # self.log.info("Test re-orging blocks with OP_RETURN in them") b84 = self.next_block(84) tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN])) vout_offset = len(tx1.vout) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() self.sign_tx(tx1, out[29]) tx1.rehash() tx2 = self.create_tx(tx1, vout_offset, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = self.create_tx(tx1, vout_offset + 1, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = self.create_tx(tx1, vout_offset + 2, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = self.create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) self.sync_blocks([b84], True) self.save_spendable_output() self.move_tip(83) b85 = self.next_block(85, spend=out[29]) self.sync_blocks([b85], False) # other chain is same length b86 = self.next_block(86, spend=out[30]) self.sync_blocks([b86], True) self.move_tip(84) b87 = self.next_block(87, spend=out[30]) self.sync_blocks([b87], False) # other chain is same length self.save_spendable_output() b88 = self.next_block(88, spend=out[31]) self.sync_blocks([b88], True) self.save_spendable_output() # trying to spend the OP_RETURN output is rejected b89a = self.next_block("89a", spend=out[32]) tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) b89a = self.update_block("89a", [tx]) self.sync_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info( "Test a re-org of one week's worth of blocks (1088 blocks)") self.move_tip(88) LARGE_REORG_SIZE = 1088 blocks = [] spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = self.next_block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = self.update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) blocks.append(b) self.save_spendable_output() spend = self.get_spendable_output() self.sync_blocks(blocks, True, timeout=960) chain1_tip = i # now create alt chain of same length self.move_tip(88) blocks2 = [] for i in range(89, LARGE_REORG_SIZE + 89): blocks2.append(self.next_block("alt" + str(i))) self.sync_blocks(blocks2, False, request_block=False) # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1)) self.sync_blocks([block], True, timeout=960) # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1) self.sync_blocks([block], False, request_block=False) block = self.next_block(chain1_tip + 2) self.sync_blocks([block], True, timeout=960)
def test_witness_block_size(self): # TODO: Test that non-witness carrying blocks can't exceed 1MB # Skipping this test for now; this is covered in p2p-fullblocktest.py # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. block = self.build_next_block() assert len(self.utxo) > 0 # Create a P2WSH transaction. # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. # This should give us plenty of room to tweak the spending tx's # virtual size. NUM_DROPS = 200 # 201 max ops per script! NUM_OUTPUTS = 50 witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE]) witness_hash = uint256_from_str(sha256(witness_program)) script_pubkey = CScript([OP_0, ser_uint256(witness_hash)]) prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) value = self.utxo[0].nValue parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value / NUM_OUTPUTS) for i in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, script_pubkey)) parent_tx.vout[0].nValue -= 50000 assert parent_tx.vout[0].nValue > 0 parent_tx.rehash() filler_size = 3150 child_tx = CTransaction() for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for i in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [ b'a' * filler_size ] * (2 * NUM_DROPS) + [witness_program] child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) vsize = get_virtual_size(block) assert_greater_than(MAX_BLOCK_BASE_SIZE, vsize) additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4 i = 0 while additional_bytes > 0: # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 extra_bytes = min(additional_bytes + 1, 55) block.vtx[-1].wit.vtxinwit[int( i / (2 * NUM_DROPS))].scriptWitness.stack[ i % (2 * NUM_DROPS)] = b'a' * (filler_size + extra_bytes) additional_bytes -= extra_bytes i += 1 block.vtx[0].vout.pop() # Remove old commitment add_witness_commitment(block) block.solve() vsize = get_virtual_size(block) assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) # Make sure that our test case would exceed the old max-network-message # limit assert len(block.serialize()) > 2 * 1024 * 1024 test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now resize the second transaction to make the block fit. cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * ( cur_length - 1) block.vtx[0].vout.pop() add_witness_commitment(block) block.solve() assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Update available utxo's self.utxo.pop(0) self.utxo.append( UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
def send_clsig(self, clsig): clhash = uint256_from_str(hash256(clsig.serialize())) self.clsigs[clhash] = clsig inv = msg_inv([CInv(20, clhash)]) self.send_message(inv)
def def_utxo(height): hex_id = hex_str_to_bytes('0' * 64) uint256 = uint256_from_str(hex_id) return UTXO(height, TxType.REGULAR, COutPoint(uint256, 0), CTxOut(0, b""))
def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. peer_0 = self.nodes[0].add_p2p_connection(FiltersClient()) peer_1 = self.nodes[1].add_p2p_connection(FiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.generate(self.nodes[0], 999) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting self.disconnect_nodes(0, 1) stale_block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] self.nodes[0].syncwithvalidationinterfacequeue() assert_equal(self.nodes[0].getblockcount(), 1000) self.generate(self.nodes[1], 1001, sync_fun=self.no_op) assert_equal(self.nodes[1].getblockcount(), 2000) # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. assert peer_0.nServices & NODE_COMPACT_FILTERS != 0 assert peer_1.nServices & NODE_COMPACT_FILTERS == 0 # Check that the localservices is as expected. assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0 assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0 self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(message=request) response = peer_0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") self.connect_nodes(0, 1) self.sync_blocks(timeout=600) self.nodes[0].syncwithvalidationinterfacequeue() main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" self.log.info("Check that peers can fetch cfcheckpt on active chain.") tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)], ) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (stale_cfcheckpt, )], ) self.log.info("Check that peers can fetch cfheaders on active chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(main_block_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.last_message['cfheaders'] main_cfhashes = response.hashes assert_equal(len(main_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(main_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfheaders on stale chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.last_message['cfheaders'] stale_cfhashes = response.hashes assert_equal(len(stale_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(stale_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfilters.") stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stop_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.pop_cfilters() assert_equal(len(response), 10) self.log.info("Check that cfilter responses are correct.") for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): block_hash = self.nodes[0].getblockhash(height) assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, cfhash) self.log.info("Check that peers can fetch cfilters for stale blocks.") request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(stale_block_hash, 16), ) peer_0.send_and_ping(request) response = peer_0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, stale_cfhashes[999]) self.log.info( "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection." ) requests = [ msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16), ), msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16), ), msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16), ), ] for request in requests: peer_1 = self.nodes[1].add_p2p_connection(P2PInterface()) peer_1.send_message(request) peer_1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting too many filters results in disconnection. msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(main_block_hash, 16), ), # Requesting too many filter headers results in disconnection. msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(tip_hash, 16), ), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt( filter_type=255, stop_hash=int(main_block_hash, 16), ), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) peer_0.send_message(request) peer_0.wait_for_disconnect() self.log.info( "Test -peerblockfilters without -blockfilterindex raises an error") self.stop_node(0) self.nodes[0].extra_args = ["-peerblockfilters"] msg = "Error: Cannot set -peerblockfilters without -blockfilterindex." self.nodes[0].assert_start_raises_init_error(expected_msg=msg) self.log.info( "Test -blockfilterindex with -reindex-chainstate raises an error") self.nodes[0].assert_start_raises_init_error( expected_msg= 'Error: -reindex-chainstate option is not compatible with -blockfilterindex. ' 'Please temporarily disable blockfilterindex while using -reindex-chainstate, or replace -reindex-chainstate with -reindex to fully rebuild all indexes.', extra_args=['-blockfilterindex', '-reindex-chainstate'], )
def test_coinbase_witness(self): def WitToHex(obj): return bytes_to_hex_str(obj.serialize(with_witness=True)) block = self.nodes[0].getnewblockhex() block_struct = FromHex(CBlock(), block) # Test vanilla block round-trip self.nodes[0].testproposedblock(WitToHex(block_struct)) # Assert there's scriptWitness in the coinbase input that is the witness nonce and nothing else assert_equal(block_struct.vtx[0].wit.vtxinwit[0].scriptWitness.stack, [b'\x00'*32]) assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof, b'') assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof, b'') assert_equal(block_struct.vtx[0].wit.vtxinwit[0].peginWitness.stack, []) # Add extra witness that isn't covered by witness merkle root, make sure blocks are still valid block_witness_stuffed = copy.deepcopy(block_struct) block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof = b'\x00' assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) block_witness_stuffed = copy.deepcopy(block_struct) block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof = b'\x00' assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) block_witness_stuffed = copy.deepcopy(block_struct) # Let's blow out block weight limit by adding 4MW here block_witness_stuffed.vtx[0].wit.vtxinwit[0].peginWitness.stack = [b'\x00'*4000000] assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) # Test that node isn't blinded to the block # Previously an over-stuffed block >4MW would have been marked permanently bad # as it already passes witness merkle and regular merkle root checks block_height = self.nodes[0].getblockcount() assert_equal(self.nodes[0].submitblock(WitToHex(block_witness_stuffed)), "bad-cb-witness") assert_equal(block_height, self.nodes[0].getblockcount()) assert_equal(self.nodes[0].submitblock(WitToHex(block_struct)), None) assert_equal(block_height+1, self.nodes[0].getblockcount()) # New block since we used the first one block_struct = FromHex(CBlock(), self.nodes[0].getnewblockhex()) block_witness_stuffed = copy.deepcopy(block_struct) # Add extra witness data that is covered by witness merkle root, make sure invalid assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof, b'') assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof, b'') block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof = b'\x00'*100000 block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof = b'\x00'*100000 assert_raises_rpc_error(-25, "bad-witness-merkle-match", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) witness_root_hex = block_witness_stuffed.calc_witness_merkle_root() witness_root = uint256_from_str(hex_str_to_bytes(witness_root_hex)[::-1]) block_witness_stuffed.vtx[0].vout[-1] = CTxOut(0, get_witness_script(witness_root, 0)) block_witness_stuffed.vtx[0].rehash() block_witness_stuffed.hashMerkleRoot = block_witness_stuffed.calc_merkle_root() block_witness_stuffed.rehash() assert_raises_rpc_error(-25, "bad-cb-amount", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) assert_greater_than(len(WitToHex(block_witness_stuffed)), 100000*4) # Make sure the witness data is actually serialized # A CTxIn that always serializes the asset issuance, even for coinbases. class AlwaysIssuanceCTxIn(CTxIn): def serialize(self): r = b'' outpoint = COutPoint() outpoint.hash = self.prevout.hash outpoint.n = self.prevout.n outpoint.n |= OUTPOINT_ISSUANCE_FLAG r += outpoint.serialize() r += ser_string(self.scriptSig) r += struct.pack("<I", self.nSequence) r += self.assetIssuance.serialize() return r # Test that issuance inputs in coinbase don't survive a serialization round-trip # (even though this can't cause issuance to occur either way due to VerifyCoinbaseAmount semantics) block_witness_stuffed = copy.deepcopy(block_struct) coinbase_orig = copy.deepcopy(block_witness_stuffed.vtx[0].vin[0]) coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize()) block_witness_stuffed.vtx[0].vin[0] = AlwaysIssuanceCTxIn() block_witness_stuffed.vtx[0].vin[0].prevout = coinbase_orig.prevout block_witness_stuffed.vtx[0].vin[0].scriptSig = coinbase_orig.scriptSig block_witness_stuffed.vtx[0].vin[0].nSequence = coinbase_orig.nSequence block_witness_stuffed.vtx[0].vin[0].assetIssuance.nAmount.setToAmount(1) bad_coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize()) # 32+32+9+1 should be serialized for each assetIssuance field assert_equal(bad_coinbase_ser_size, coinbase_ser_size+32+32+9+1) assert(not block_witness_stuffed.vtx[0].vin[0].assetIssuance.isNull()) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, ToHex(block_witness_stuffed.vtx[0]))
def send_islock(self, islock): hash = uint256_from_str(hash256(islock.serialize())) self.islocks[hash] = islock inv = msg_inv([CInv(30, hash)]) self.send_message(inv)
def compute_last_header(prev_header, hashes): """Compute the last filter header from a starting header and a sequence of filter hashes.""" header = ser_uint256(prev_header) for filter_hash in hashes: header = hash256(ser_uint256(filter_hash) + header) return uint256_from_str(header)
def send_tx(self, tx): hash = uint256_from_str(hash256(tx.serialize())) self.txes[hash] = tx inv = msg_inv([CInv(30, hash)]) self.send_message(inv)
def test_coinbase_witness(self): block = self.nodes[0].getnewblockhex() block_struct = FromHex(CBlock(), block) # Test vanilla block round-trip self.nodes[0].testproposedblock(WitToHex(block_struct)) # Assert there's scriptWitness in the coinbase input that is the witness nonce and nothing else assert_equal(block_struct.vtx[0].wit.vtxinwit[0].scriptWitness.stack, [b'\x00'*32]) assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof, b'') assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof, b'') assert_equal(block_struct.vtx[0].wit.vtxinwit[0].peginWitness.stack, []) # Add extra witness that isn't covered by witness merkle root, make sure blocks are still valid block_witness_stuffed = copy.deepcopy(block_struct) block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof = b'\x00' assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) block_witness_stuffed = copy.deepcopy(block_struct) block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof = b'\x00' assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) block_witness_stuffed = copy.deepcopy(block_struct) # Let's blow out block weight limit by adding 4MW here block_witness_stuffed.vtx[0].wit.vtxinwit[0].peginWitness.stack = [b'\x00'*4000000] assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) # Test that node isn't blinded to the block # Previously an over-stuffed block >4MW would have been marked permanently bad # as it already passes witness merkle and regular merkle root checks block_height = self.nodes[0].getblockcount() assert_equal(self.nodes[0].submitblock(WitToHex(block_witness_stuffed)), "bad-cb-witness") assert_equal(block_height, self.nodes[0].getblockcount()) assert_equal(self.nodes[0].submitblock(WitToHex(block_struct)), None) assert_equal(block_height+1, self.nodes[0].getblockcount()) # New block since we used the first one block_struct = FromHex(CBlock(), self.nodes[0].getnewblockhex()) block_witness_stuffed = copy.deepcopy(block_struct) # Add extra witness data that is covered by witness merkle root, make sure invalid assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof, b'') assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof, b'') block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof = b'\x00'*100000 block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof = b'\x00'*100000 assert_raises_rpc_error(-25, "bad-witness-merkle-match", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) witness_root_hex = block_witness_stuffed.calc_witness_merkle_root() witness_root = uint256_from_str(hex_str_to_bytes(witness_root_hex)[::-1]) block_witness_stuffed.vtx[0].vout[-1] = CTxOut(0, get_witness_script(witness_root, 0)) block_witness_stuffed.vtx[0].rehash() block_witness_stuffed.hashMerkleRoot = block_witness_stuffed.calc_merkle_root() block_witness_stuffed.rehash() assert_raises_rpc_error(-25, "bad-cb-amount", self.nodes[0].testproposedblock, WitToHex(block_witness_stuffed)) assert_greater_than(len(WitToHex(block_witness_stuffed)), 100000*4) # Make sure the witness data is actually serialized # A CTxIn that always serializes the asset issuance, even for coinbases. class AlwaysIssuanceCTxIn(CTxIn): def serialize(self): r = b'' outpoint = COutPoint() outpoint.hash = self.prevout.hash outpoint.n = self.prevout.n outpoint.n |= OUTPOINT_ISSUANCE_FLAG r += outpoint.serialize() r += ser_string(self.scriptSig) r += struct.pack("<I", self.nSequence) r += self.assetIssuance.serialize() return r # Test that issuance inputs in coinbase don't survive a serialization round-trip # (even though this can't cause issuance to occur either way due to VerifyCoinbaseAmount semantics) block_witness_stuffed = copy.deepcopy(block_struct) coinbase_orig = copy.deepcopy(block_witness_stuffed.vtx[0].vin[0]) coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize()) block_witness_stuffed.vtx[0].vin[0] = AlwaysIssuanceCTxIn() block_witness_stuffed.vtx[0].vin[0].prevout = coinbase_orig.prevout block_witness_stuffed.vtx[0].vin[0].scriptSig = coinbase_orig.scriptSig block_witness_stuffed.vtx[0].vin[0].nSequence = coinbase_orig.nSequence block_witness_stuffed.vtx[0].vin[0].assetIssuance.nAmount.setToAmount(1) bad_coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize()) # 32+32+9+1 should be serialized for each assetIssuance field assert_equal(bad_coinbase_ser_size, coinbase_ser_size+32+32+9+1) assert not block_witness_stuffed.vtx[0].vin[0].assetIssuance.isNull() assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, ToHex(block_witness_stuffed.vtx[0]))
def run_test(self): bitno = 1 activated_version = 0x20000000 | (1 << bitno) node = self.nodes[0] # convenience reference to the node self.bootstrap_p2p() # Add one p2p connection to the node assert_equal(self.get_bip9_status('finaltx')['status'], 'defined') assert_equal(self.get_bip9_status('finaltx')['since'], 0) self.log.info( "Generate some blocks to get the chain going and un-stick the mining RPCs" ) node.generate(2) assert_equal(node.getblockcount(), 2) self.height = 3 # height of the next block to build self.tip = int("0x" + node.getbestblockhash(), 0) self.nodeaddress = node.getnewaddress() self.last_block_time = int(time.time()) self.log.info("\'finaltx\' begins in DEFINED state") assert_equal(self.get_bip9_status('finaltx')['status'], 'defined') assert_equal(self.get_bip9_status('finaltx')['since'], 0) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl['rules']) assert ('finaltx' not in tmpl['vbavailable']) assert ('finaltx' not in tmpl) assert_equal(tmpl['vbrequired'], 0) assert_equal(tmpl['version'] & activated_version, 0x20000000) self.log.info("Test 1: Advance from DEFINED to STARTED") test_blocks = self.generate_blocks(141, 4) # height = 143 assert_equal(self.get_bip9_status('finaltx')['status'], 'started') assert_equal(self.get_bip9_status('finaltx')['since'], 144) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 0) assert_equal(self.get_bip9_status('finaltx')['statistics']['count'], 0) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl['rules']) assert_equal(tmpl['vbavailable']['finaltx'], bitno) assert_equal(tmpl['vbrequired'], 0) assert ('finaltx' not in tmpl) assert_equal(tmpl['version'] & activated_version, activated_version) self.log.info( "Save one of the anyone-can-spend coinbase outputs for later.") assert_equal(test_blocks[-1][0].vtx[0].vout[0].nValue, 5000000000) assert_equal(test_blocks[-1][0].vtx[0].vout[0].scriptPubKey, CScript([OP_TRUE])) early_coin = COutPoint(test_blocks[-1][0].vtx[0].sha256, 0) self.log.info( "Test 2: Check stats after max number of \"not signalling\" blocks such that LOCKED_IN still possible this period" ) self.generate_blocks(36, 4) # 0x00000004 (not signalling) self.generate_blocks(10, activated_version) # 0x20000001 (not signalling) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 46) assert_equal( self.get_bip9_status('finaltx')['statistics']['count'], 10) assert_equal( self.get_bip9_status('finaltx')['statistics']['possible'], True) self.log.info( "Test 3: Check stats after one additional \"not signalling\" block -- LOCKED_IN no longer possible this period" ) self.generate_blocks(1, 4) # 0x00000004 (not signalling) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 47) assert_equal( self.get_bip9_status('finaltx')['statistics']['count'], 10) assert_equal( self.get_bip9_status('finaltx')['statistics']['possible'], False) self.log.info( "Test 4: Finish period with \"ready\" blocks, but soft fork will still fail to advance to LOCKED_IN" ) self.generate_blocks( 97, activated_version) # 0x20000001 (signalling ready) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 0) assert_equal(self.get_bip9_status('finaltx')['statistics']['count'], 0) assert_equal( self.get_bip9_status('finaltx')['statistics']['possible'], True) assert_equal(self.get_bip9_status('finaltx')['status'], 'started') self.log.info( "Test 5: Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 using a variety of bits to simulate multiple parallel softforks" ) self.generate_blocks( 50, activated_version) # 0x20000001 (signalling ready) self.generate_blocks(20, 4) # 0x00000004 (not signalling) self.generate_blocks( 50, activated_version) # 0x20000101 (signalling ready) self.generate_blocks(24, 4) # 0x20010000 (not signalling) assert_equal(self.get_bip9_status('finaltx')['status'], 'started') assert_equal(self.get_bip9_status('finaltx')['since'], 144) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 0) assert_equal(self.get_bip9_status('finaltx')['statistics']['count'], 0) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl['rules']) assert_equal(tmpl['vbavailable']['finaltx'], bitno) assert_equal(tmpl['vbrequired'], 0) assert_equal(tmpl['version'] & activated_version, activated_version) self.log.info( "Test 6: 108 out of 144 signal bit 1 to achieve LOCKED_IN using a variety of bits to simulate multiple parallel softforks" ) self.generate_blocks( 57, activated_version) # 0x20000001 (signalling ready) self.generate_blocks(26, 4) # 0x00000004 (not signalling) self.generate_blocks( 50, activated_version) # 0x20000101 (signalling ready) self.generate_blocks(10, 4) # 0x20010000 (not signalling) self.log.info( "check counting stats and \"possible\" flag before last block of this period achieves LOCKED_IN..." ) assert_equal( self.get_bip9_status('finaltx')['statistics']['elapsed'], 143) assert_equal( self.get_bip9_status('finaltx')['statistics']['count'], 107) assert_equal( self.get_bip9_status('finaltx')['statistics']['possible'], True) assert_equal(self.get_bip9_status('finaltx')['status'], 'started') self.log.info("Test 7: ...continue with Test 6") self.generate_blocks( 1, activated_version) # 0x20000001 (signalling ready) assert_equal(self.get_bip9_status('finaltx')['status'], 'locked_in') assert_equal(self.get_bip9_status('finaltx')['since'], 576) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl['rules']) self.log.info( "Test 8: 143 more version 536870913 blocks (waiting period-1)") self.generate_blocks(143, 4) assert_equal(self.get_bip9_status('finaltx')['status'], 'locked_in') assert_equal(self.get_bip9_status('finaltx')['since'], 576) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl['rules']) assert ('finaltx' in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) assert_equal(tmpl['version'] & activated_version, activated_version) self.log.info( "Test 9: Generate a block without any spendable outputs, which should be allowed under normal circumstances." ) test_blocks = self.generate_blocks(1, 4, sync=False) for txout in test_blocks[-1][0].vtx[0].vout: txout.scriptPubKey = CScript([OP_FALSE]) test_blocks[-1][0].vtx[0].rehash() test_blocks[-1][0].hashMerkleRoot = test_blocks[-1][ 0].calc_merkle_root() test_blocks[-1][0].rehash() test_blocks[-1][0].solve() node.submitblock(ToHex(test_blocks[-1][0])) assert_equal(node.getbestblockhash(), test_blocks[-1][0].hash) self.tip = test_blocks[-1][0].sha256 # Hash has changed assert_equal(self.get_bip9_status('finaltx')['status'], 'active') tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' in tmpl['rules']) assert ('finaltx' not in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) assert (not (tmpl['version'] & (1 << bitno))) self.log.info( "Test 10: Attempt to do the same thing: generate a block with no spendable outputs in the coinbase. This fails because the next block needs at least one trivially spendable output to start the block-final transaction chain." ) block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 5 for txout in block.vtx[0].vout: txout.scriptPubKey = CScript([OP_FALSE]) block.vtx[0].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) self.log.info( "Test 11: Generate the first block with block-final-tx rules enforced, which reuires the coinbase to have a trivially-spendable output." ) self.generate_blocks(1, 4) assert (any(out.scriptPubKey == CScript([OP_TRUE]) for out in test_blocks[-1][0].vtx[0].vout)) for n, txout in enumerate(test_blocks[-1][0].vtx[0].vout): non_protected_output = COutPoint(test_blocks[-1][0].vtx[0].sha256, n) assert_equal(txout.nValue, 312500000) self.log.info("Test 12: Generate 98 blocks (maturity period - 2)") self.generate_blocks(98, 4) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' not in tmpl) self.log.info( "Test 13: Generate one more block to allow non_protected_output to mature, which causes the block-final transaction to be required in the next block." ) self.generate_blocks(1, 4) tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' in tmpl) assert_equal(len(tmpl['finaltx']['prevout']), 1) assert_equal( tmpl['finaltx']['prevout'][0]['txid'], encode(ser_uint256(non_protected_output.hash)[::-1], 'hex_codec').decode('ascii')) assert_equal(tmpl['finaltx']['prevout'][0]['vout'], non_protected_output.n) assert_equal(tmpl['finaltx']['prevout'][0]['amount'], 312470199) self.log.info( "Extra pass-through value is not included in the coinbasevalue field." ) assert_equal(tmpl['coinbasevalue'], 5000000000 // 2**(self.height // 150)) self.log.info( "The transactions field does not contain the block-final transaction." ) assert_equal(len(tmpl['transactions']), 0) self.log.info( "Test 14: Attempt to create a block without the block-final transaction, which fails." ) block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) self.log.info( "Test 15: Add the block-final transaction, and it passes.") tx_final = CTransaction() tx_final.nVersion = 2 tx_final.vin.append( CTxIn(non_protected_output, CScript([]), 0xffffffff)) tx_final.vout.append(CTxOut(312470199, CScript([OP_TRUE]))) tx_final.nLockTime = block.vtx[0].nLockTime tx_final.lock_height = block.vtx[0].lock_height tx_final.rehash() block.vtx.append(tx_final) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), block.hash) prev_final_tx = block.vtx[-1] self.last_block_time += 1 self.tip = block.sha256 self.height += 1 tmpl = node.getblocktemplate( {'rules': ['segwit', 'finaltx', 'auxpow']}) assert ('finaltx' in tmpl) assert_equal(len(tmpl['finaltx']['prevout']), 1) assert_equal( tmpl['finaltx']['prevout'][0]['txid'], encode(ser_uint256(tx_final.sha256)[::-1], 'hex_codec').decode('ascii')) assert_equal(tmpl['finaltx']['prevout'][0]['vout'], 0) assert_equal(tmpl['finaltx']['prevout'][0]['amount'], 312469901) self.log.info( "Test 16: Create a block-final transaction with multiple outputs, which doesn't work because the number of outputs is restricted to be no greater than the number of inputs." ) block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 4 tx_final = CTransaction() tx_final.nVersion = 2 tx_final.vin.append( CTxIn(COutPoint(prev_final_tx.sha256, 0), CScript([]), 0xffffffff)) tx_final.vout.append(CTxOut(156234951, CScript([OP_TRUE]))) tx_final.vout.append(CTxOut(156234950, CScript([OP_TRUE]))) tx_final.nLockTime = block.vtx[0].nLockTime tx_final.lock_height = block.vtx[0].lock_height tx_final.rehash() block.vtx.append(tx_final) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) self.log.info( "Test 17: Try increasing the number of inputs by using an old one doesn't work, because the block-final transaction must source its user inputs from the same block." ) utxo = node.gettxout( encode(ser_uint256(early_coin.hash)[::-1], 'hex_codec').decode('ascii'), early_coin.n) assert ('amount' in utxo) utxo_amount = int(100000000 * utxo['amount']) block.vtx[-1].vin.append(CTxIn(early_coin, CScript([]), 0xffffffff)) block.vtx[-1].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) self.log.info( "Test 18: But spend it via a user transaction instead, and it can be captured and sent to the coinbase as fee." ) # Insert spending transaction spend_tx = CTransaction() spend_tx.nVersion = 2 spend_tx.vin.append(CTxIn(early_coin, CScript([]), 0xffffffff)) spend_tx.vout.append(CTxOut(utxo_amount, CScript([OP_TRUE]))) spend_tx.nLockTime = 0 spend_tx.lock_height = block.vtx[0].lock_height spend_tx.rehash() block.vtx.insert(1, spend_tx) # Capture output of spend_tx in block-final tx (but don't update the # outputs--the value passes on to the coinbase as fee). block.vtx[-1].vin[-1].prevout = COutPoint(spend_tx.sha256, 0) block.vtx[-1].rehash() # Add the captured value to the block reward. block.vtx[0].vout[0].nValue += utxo_amount block.vtx[0].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), block.hash) prev_final_tx = block.vtx[-1] self.last_block_time += 1 self.tip = block.sha256 self.height += 1 self.log.info( "Test 19: Spending only one of the prior outputs is insufficient. ALL prior block-final outputs must be spent." ) block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 4 tx_final = CTransaction() tx_final.nVersion = 2 tx_final.vin.append( CTxIn(COutPoint(prev_final_tx.sha256, 0), CScript([]), 0xffffffff)) tx_final.vout.append(CTxOut(156234801, CScript([OP_TRUE]))) tx_final.nLockTime = block.vtx[0].nLockTime tx_final.lock_height = block.vtx[0].lock_height tx_final.rehash() block.vtx.append(tx_final) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) self.log.info( "Test 20: But spend all the prior outputs and it goes through.") block.vtx[-1].vin.append( CTxIn(COutPoint(prev_final_tx.sha256, 1), CScript([]), 0xffffffff)) block.vtx[-1].vout[0].nValue *= 2 block.vtx[-1].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), block.hash) prev_final_tx = block.vtx[-1] self.last_block_time += 1 self.tip = block.sha256 self.height += 1 self.log.info( "Test 21: Now that the rules have activated, transactions spending the previous block-final transaction's outputs are rejected from the mempool." ) self.log.info( "First we do this with a non-block-final input to demonstrate the test would otherwise work." ) height = node.getblockcount() - 99 while True: blk = node.getblock(node.getblockhash(height)) txid = blk['tx'][0] utxo = node.gettxout(txid, 0) if utxo is not None and utxo['scriptPubKey']['hex'] == "51": break height -= 1 spend_tx = CTransaction() spend_tx.nVersion = 2 spend_tx.vin.append( CTxIn(COutPoint(uint256_from_str(unhexlify(txid)[::-1]), 0), CScript([]), 0xffffffff)) spend_tx.vout.append( CTxOut( int(utxo['amount'] * 100000000) - 10000, CScript([b'a' * 100])) ) # Make transaction large enough to avoid tx-size-small standardness check spend_tx.nLockTime = 0 spend_tx.lock_height = utxo['refheight'] spend_tx.rehash() node.sendrawtransaction(ToHex(spend_tx)) mempool = node.getrawmempool() assert (spend_tx.hash in mempool) self.log.info( "Now we do the same exact thing with the last block-final transaction's outputs. It should not enter the mempool." ) spend_tx = CTransaction() spend_tx.nVersion = 2 spend_tx.vin.append( CTxIn(COutPoint(prev_final_tx.sha256, 0), CScript([]), 0xffffffff)) spend_tx.vout.append( CTxOut(int(utxo['amount'] * 100000000), CScript([b'a' * 100])) ) # Make transaction large enough to avoid tx-size-small standardness check spend_tx.nLockTime = 0 spend_tx.lock_height = utxo['refheight'] spend_tx.rehash() try: node.sendrawtransaction(ToHex(spend_tx)) except JSONRPCException as e: assert ("spend-block-final-txn" in e.error['message']) else: assert (False) mempool = node.getrawmempool() assert (spend_tx.hash not in mempool) self.log.info( "Test 22: Invalidate the tip, then malleate and re-solve the same block. This is a fast way of testing test that the block-final txid is restored on a reorg." ) height = node.getblockcount() node.invalidateblock(block.hash) assert_equal(node.getblockcount(), height - 1) block.nVersion ^= 2 block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getblockcount(), height) assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.finaltx_vin = [ CTxIn(COutPoint(block.vtx[-1].sha256, 0), CScript([]), 0xffffffff) ] self.log.info( "Test 22-25: Mine two blocks with trivially-spendable coinbase outputs, then test that the one that is exactly 100 blocks old is allowed to be spent in a block-final transaction, but the older one cannot." ) self.generate_blocks(1, 4, finaltx=True) assert_equal(test_blocks[-1][0].vtx[0].vout[0].scriptPubKey, CScript([OP_TRUE])) txin1 = CTxIn(COutPoint(test_blocks[-1][0].vtx[0].sha256, 0), CScript([]), 0xffffffff) self.generate_blocks(1, 4, finaltx=True) assert_equal(test_blocks[-1][0].vtx[0].vout[0].scriptPubKey, CScript([OP_TRUE])) txin2 = CTxIn(COutPoint(test_blocks[-1][0].vtx[0].sha256, 0), CScript([]), 0xffffffff) self.generate_blocks(1, 4, finaltx=True) assert_equal(test_blocks[-1][0].vtx[0].vout[0].scriptPubKey, CScript([OP_TRUE])) txin3 = CTxIn(COutPoint(test_blocks[-1][0].vtx[0].sha256, 0), CScript([]), 0xffffffff) self.generate_blocks(98, 4, finaltx=True) # txin1 is too old -- it should have been collected on the last block block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 4 tx_final = CTransaction() tx_final.nVersion = 2 tx_final.vin.extend(self.finaltx_vin) tx_final.vin.append(txin1) tx_final.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx_final.nLockTime = block.vtx[0].nLockTime tx_final.lock_height = block.vtx[0].lock_height tx_final.rehash() block.vtx.append(tx_final) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) # txin3 is too young -- it hasn't matured block.vtx[-1].vin.pop() block.vtx[-1].vin.append(txin3) block.vtx[-1].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), ser_uint256(self.tip)[::-1].hex()) # txin2 is just right block.vtx[-1].vin.pop() block.vtx[-1].vin.append(txin2) block.vtx[-1].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) assert_equal(node.getbestblockhash(), block.hash) self.last_block_time += 1 self.tip = block.sha256 self.height += 1 self.finaltx_vin = [ CTxIn(COutPoint(block.vtx[-1].sha256, 0), CScript([]), 0xffffffff) ]
def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. node0 = self.nodes[0].add_p2p_connection(CFiltersClient()) node1 = self.nodes[1].add_p2p_connection(CFiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.nodes[0].generate(999) self.sync_blocks(timeout=600) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) wait_until(lambda: self.nodes[0].getblockcount() == 1000) stale_block_hash = self.nodes[0].getblockhash(1000) self.nodes[1].generate(1001) wait_until(lambda: self.nodes[1].getblockcount() == 2000) # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. assert node0.nServices & NODE_COMPACT_FILTERS != 0 assert node1.nServices & NODE_COMPACT_FILTERS == 0 # Check that the localservices is as expected. assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0 assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0 self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(message=request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") connect_nodes(self.nodes[0], self.nodes[1]) self.sync_blocks(timeout=600) main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" self.log.info("Check that peers can fetch cfcheckpt on active chain.") tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal(response.headers, [int(header, 16) for header in (stale_cfcheckpt, )]) self.log.info("Check that peers can fetch cfheaders on active chain.") request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(main_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfheaders'] main_cfhashes = response.hashes assert_equal(len(main_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(main_cfcheckpt, 16)) self.log.info("Check that peers can fetch cfheaders on stale chain.") request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfheaders'] stale_cfhashes = response.hashes assert_equal(len(stale_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(stale_cfcheckpt, 16)) self.log.info("Check that peers can fetch cfilters.") stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stop_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 10) self.log.info("Check that cfilter responses are correct.") for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): block_hash = self.nodes[0].getblockhash(height) assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, cfhash) self.log.info("Check that peers can fetch cfilters for stale blocks.") request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, stale_cfhashes[999]) self.log.info( "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection." ) requests = [ msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16)), msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), ] for request in requests: node1 = self.nodes[1].add_p2p_connection(P2PInterface()) node1.send_message(request) node1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting too many filters results in disconnection. msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(main_block_hash, 16)), # Requesting too many filter headers results in disconnection. msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(tip_hash, 16)), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt(filter_type=255, stop_hash=int(main_block_hash, 16)), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: node0 = self.nodes[0].add_p2p_connection(P2PInterface()) node0.send_message(request) node0.wait_for_disconnect()
def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. node0 = self.nodes[0].add_p2p_connection(CFiltersClient()) node1 = self.nodes[1].add_p2p_connection(CFiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.nodes[0].generate(999) sync_blocks(self.nodes) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting disconnect_nodes(self.nodes[0], 1) self.nodes[0].generate(1) wait_until(lambda: self.nodes[0].getblockcount() == 1000) stale_block_hash = self.nodes[0].getblockhash(1000) self.nodes[1].generate(1001) wait_until(lambda: self.nodes[1].getblockcount() == 2000) # Fetch cfcheckpt on node 0. Since the implementation caches the checkpoints on the active # chain in memory, this checks that the cache is updated correctly upon subsequent queries # after the reorg. request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping(timeout=5) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) # Reorg node 0 to a new chain connect_nodes(self.nodes[0], 1) sync_blocks(self.nodes) main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" default_services = node1.nServices # Check that nodes have signalled expected services. assert 0 == node1.nServices & NODE_COMPACT_FILTERS assert_equal(node0.nServices, default_services | NODE_COMPACT_FILTERS) # Check that the localservices is as expected. assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), default_services | NODE_COMPACT_FILTERS) assert_equal(int(self.nodes[1].getnetworkinfo()['localservices'], 16), default_services) # Check that peers can fetch cfcheckpt on active chain. tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]) # Check that peers can fetch cfcheckpt on stale chain. request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal(response.headers, [int(header, 16) for header in (stale_cfcheckpt, )]) # Check that peers can fetch cfheaders on active chain. request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(main_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.last_message['cfheaders'] main_cfhashes = response.hashes assert_equal( compute_last_header(response.prev_header, response.hashes), int(main_cfcheckpt, 16)) # Check that peers can fetch cfheaders on stale chain. request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.last_message['cfheaders'] stale_cfhashes = response.hashes assert_equal( compute_last_header(response.prev_header, response.hashes), int(stale_cfcheckpt, 16)) # Check that peers can fetch cfilters. stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stop_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 10) # Check that cfilter responses are correct. for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): block_hash = self.nodes[0].getblockhash(height) assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, cfhash) # Check that peers can fetch cfilters for stale blocks. stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, stale_cfhashes[999]) # Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection. requests = [ msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16)), msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), ] node1.sync_with_ping( ) # ensure 'ping' has at least one message before we copy node1_check_message_count = dict(node1.message_count) node1_check_message_count['pong'] += 1 for request in requests: node1.send_message(request) node1.sync_with_ping() assert_equal(node1_check_message_count, dict(node1.message_count)) # Check that invalid requests result in disconnection. requests = [ # Requesting too many filters results in disconnection. msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(main_block_hash, 16)), # Requesting too many filter headers results in disconnection. msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(tip_hash, 16)), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt(filter_type=255, stop_hash=int(main_block_hash, 16)), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: node0 = self.nodes[0].add_p2p_connection(CFiltersClient()) node0.send_message(request) node0.wait_for_disconnect()