Пример #1
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PDataStore())

        self.log.info("Generate blocks in the past for coinbase outputs.")
        long_past_time = int(time.time()) - 600 * 1000  # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
        self.nodes[0].setmocktime(long_past_time - 100)  # enough so that the generated blocks will still all be before long_past_time
        self.coinbase_blocks = self.nodes[0].generate(COINBASE_BLOCK_COUNT)  # blocks generated for inputs
        self.nodes[0].setmocktime(0)  # set time back to present so yielded blocks aren't in the future as we advance last_block_time
        self.tipheight = COINBASE_BLOCK_COUNT  # height of the next block to build
        self.last_block_time = long_past_time
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Activation height is hardcoded
        # We advance to block height five below BIP112 activation for the following tests
        test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
        self.send_blocks(test_blocks)
        assert not softfork_active(self.nodes[0], 'csv')

        # Inputs at height = 431
        #
        # Put inputs for all tests in the chain at height 431 (tip now = 430) (time increases by 600s per block)
        # Note we reuse inputs for v1 and v2 txs so must test these separately
        # 16 normal inputs
        bip68inputs = []
        for i in range(16):
            bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))

        # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112basicinputs = []
        for j in range(2):
            inputs = []
            for i in range(16):
                inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
            bip112basicinputs.append(inputs)

        # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112diverseinputs = []
        for j in range(2):
            inputs = []
            for i in range(16):
                inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
            bip112diverseinputs.append(inputs)

        # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
        # 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
        bip112emptystackinput = send_generic_input_tx(self.nodes[0],self.coinbase_blocks, self.nodeaddress)

        # 1 normal input
        bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)

        self.nodes[0].setmocktime(self.last_block_time + 600)
        inputblockhash = self.nodes[0].generate(1)[0]  # 1 block generated for inputs to be in chain at height 431
        self.nodes[0].setmocktime(0)
        self.tip = int(inputblockhash, 16)
        self.tipheight += 1
        self.last_block_time += 600
        assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)

        # 2 more version 4 blocks
        test_blocks = self.generate_blocks(2)
        self.send_blocks(test_blocks)

        assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2)
        self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1))
        assert not softfork_active(self.nodes[0], 'csv')

        # Test both version 1 and version 2 transactions for all tests
        # BIP113 test transaction will be modified before each use to put in appropriate block time
        bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
        bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v1.nVersion = 1
        bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
        bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v2.nVersion = 2

        # For BIP68 test all 16 relative sequence locktimes
        bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress)
        bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress)

        # For BIP112 test:
        # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress)
        bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress)
        # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
        bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
        # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
        bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
        # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
        bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1)
        # -1 OP_CSV OP_DROP input
        bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
        bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
        # (empty stack) OP_CSV input
        bip112tx_emptystack_v1 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 1, self.nodeaddress)
        bip112tx_emptystack_v2 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 2, self.nodeaddress)

        self.log.info("TESTING")

        self.log.info("Pre-Soft Fork Tests. All txs should pass.")
        self.log.info("Test version 1 txs")

        success_txs = []
        # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        success_txs.append(bip113signed1)
        success_txs.append(bip112tx_special_v1)
        success_txs.append(bip112tx_emptystack_v1)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        success_txs = []
        # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        success_txs.append(bip113signed2)
        success_txs.append(bip112tx_special_v2)
        success_txs.append(bip112tx_emptystack_v2)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v2))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # 1 more version 4 block to get us to height 432 so the fork should now be active for the next block
        assert not softfork_active(self.nodes[0], 'csv')
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)
        assert softfork_active(self.nodes[0], 'csv')

        self.log.info("Post-Soft Fork Tests.")

        self.log.info("BIP 113 tests")
        # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        for bip113tx in [bip113signed1, bip113signed2]:
            self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')
        # BIP 113 tests should now pass if the locktime is < MTP
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        for bip113tx in [bip113signed1, bip113signed2]:
            self.send_blocks([self.create_test_block([bip113tx])])
            self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Next block height = 437 after 4 blocks of random version
        test_blocks = self.generate_blocks(4)
        self.send_blocks(test_blocks)

        self.log.info("BIP 68 tests")
        self.log.info("Test version 1 txs - all should still pass")

        success_txs = []
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
        bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
        bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
        for tx in bip68timetxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
        for tx in bip68heighttxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        # Advance one block to 438
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)

        # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
        bip68success_txs.extend(bip68timetxs)
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        for tx in bip68heighttxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        # Advance one block to 439
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)

        # All BIP 68 txs should pass
        bip68success_txs.extend(bip68heighttxs)
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("BIP 112 tests")
        self.log.info("Test version 1 txs")

        # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
        self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
        self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass

        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
        fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        self.log.info("Test version 2 txs")

        # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
        self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
        self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]

        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##

        # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # If sequencelock types mismatch, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # Remaining txs should pass, just test masking works properly
        success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Additional test, of checking that comparison of two time types works properly
        time_txs = []
        for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
            tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
            signtx = sign_transaction(self.nodes[0], tx)
            time_txs.append(signtx)

        self.send_blocks([self.create_test_block(time_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
    def run_test(self):
        # Add p2p connection to node0
        node = self.nodes[0]  # convenience reference to the node
        node.add_p2p_connection(P2PDataStore())

        best_block = node.getblock(node.getbestblockhash())
        tip = int(node.getbestblockhash(), 16)
        height = best_block["height"] + 1
        block_time = best_block["time"] + 1

        self.log.info("Create a new block with an anyone-can-spend coinbase")

        height = 1
        block = create_block(tip, create_coinbase(height), block_time)
        block.nVersion = 0x20000000
        block.solve()
        # Save the coinbase for later
        block1 = block
        tip = block.sha256
        node.p2p.send_blocks_and_test([block1], node, success=True)

        self.log.info("Mature the block.")
        node.generate(100)

        best_block = node.getblock(node.getbestblockhash())
        tip = int(node.getbestblockhash(), 16)
        height = best_block["height"] + 1
        block_time = best_block["time"] + 1

        # Use merkle-root malleability to generate an invalid block with
        # same blockheader.
        # Manufacture a block with 3 transactions (coinbase, spend of prior
        # coinbase, spend of that spend).  Duplicate the 3rd transaction to
        # leave merkle root and blockheader unchanged but invalidate the block.
        self.log.info("Test merkle root malleability.")

        block2 = create_block(tip, create_coinbase(height), block_time)
        block2.nVersion = 0x20000000
        block_time += 1

        # b'0x51' is OP_TRUE
        tx1 = create_tx_with_script(block1.vtx[0],
                                    0,
                                    script_sig=b'\x51',
                                    amount=50 * COIN)
        tx2 = create_tx_with_script(tx1,
                                    0,
                                    script_sig=b'\x51',
                                    amount=50 * COIN)

        block2.vtx.extend([tx1, tx2])
        block2.hashMerkleRoot = block2.calc_merkle_root()
        block2.rehash()
        block2.solve()
        orig_hash = block2.sha256
        block2_orig = copy.deepcopy(block2)

        # Mutate block 2
        block2.vtx.append(tx2)
        assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
        assert_equal(orig_hash, block2.rehash())
        assert block2_orig.vtx != block2.vtx

        node.p2p.send_blocks_and_test([block2],
                                      node,
                                      success=False,
                                      reject_code=16,
                                      reject_reason=b'bad-txns-duplicate')

        # Check transactions for duplicate inputs
        self.log.info("Test duplicate input block.")

        block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
        block2_orig.vtx[2].rehash()
        block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
        block2_orig.rehash()
        block2_orig.solve()
        node.p2p.send_blocks_and_test(
            [block2_orig],
            node,
            success=False,
            reject_reason=b'bad-txns-inputs-duplicate')

        self.log.info("Test very broken block.")

        block3 = create_block(tip, create_coinbase(height), block_time)
        block3.nVersion = 0x20000000
        block_time += 1
        block3.vtx[0].vout[0].nValue = 100 * COIN  # Too high!
        block3.vtx[0].sha256 = None
        block3.vtx[0].calc_sha256()
        block3.hashMerkleRoot = block3.calc_merkle_root()
        block3.rehash()
        block3.solve()

        node.p2p.send_blocks_and_test([block3],
                                      node,
                                      success=False,
                                      reject_code=16,
                                      reject_reason=b'bad-cb-amount')
Пример #3
0
    def bootstrap_p2p(self, *, num_connections=1):
        """Add a P2P connection to the node.

        Helper to connect and wait for version handshake."""
        for _ in range(num_connections):
            self.nodes[0].add_p2p_connection(P2PDataStore())
Пример #4
0
    def run_test(self):
        # Add p2p connection to node0
        node = self.nodes[0]  # convenience reference to the node
        node.add_p2p_connection(P2PDataStore())

        best_block = node.getblock(node.getbestblockhash())
        tip = int(node.getbestblockhash(), 16)
        height = best_block["height"] + 1
        block_time = best_block["time"] + 1

        self.log.info("Create a new block with an anyone-can-spend coinbase")

        height = 1
        block = create_block(tip, create_coinbase(height), block_time)
        block.solve()
        # Save the coinbase for later
        block1 = block
        tip = block.sha256
        node.p2p.send_blocks_and_test([block1], node, success=True)

        self.log.info("Mature the block.")
        node.generatetoaddress(100, node.get_deterministic_priv_key().address)

        best_block = node.getblock(node.getbestblockhash())
        tip = int(node.getbestblockhash(), 16)
        height = best_block["height"] + 1
        block_time = best_block["time"] + 1

        # Use merkle-root malleability to generate an invalid block with
        # same blockheader (CVE-2012-2459).
        # Manufacture a block with 3 transactions (coinbase, spend of prior
        # coinbase, spend of that spend).  Duplicate the 3rd transaction to
        # leave merkle root and blockheader unchanged but invalidate the block.
        # For more information on merkle-root malleability see src/consensus/merkle.cpp.
        self.log.info("Test merkle root malleability.")

        block2 = create_block(tip, create_coinbase(height), block_time)
        block_time += 1

        # b'0x51' is OP_TRUE
        tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
        tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)

        block2.vtx.extend([tx1, tx2])
        block2.hashMerkleRoot = block2.calc_merkle_root()
        block2.rehash()
        block2.solve()
        orig_hash = block2.sha256
        block2_orig = copy.deepcopy(block2)

        # Mutate block 2
        block2.vtx.append(tx2)
        assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
        assert_equal(orig_hash, block2.rehash())
        assert block2_orig.vtx != block2.vtx

        node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')

        # Check transactions for duplicate inputs (CVE-2018-17144)
        self.log.info("Test duplicate input block.")

        block2_dup = copy.deepcopy(block2_orig)
        block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
        block2_dup.vtx[2].rehash()
        block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
        block2_dup.rehash()
        block2_dup.solve()
        node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')

        self.log.info("Test very broken block.")

        block3 = create_block(tip, create_coinbase(height), block_time)
        block_time += 1
        block3.vtx[0].vout[0].nValue = 100 * COIN  # Too high!
        block3.vtx[0].sha256 = None
        block3.vtx[0].calc_sha256()
        block3.hashMerkleRoot = block3.calc_merkle_root()
        block3.rehash()
        block3.solve()

        node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')


        # Complete testing of CVE-2012-2459 by sending the original block.
        # It should be accepted even though it has the same hash as the mutated one.

        self.log.info("Test accepting original block after rejecting its mutated version.")
        node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)

        # Update tip info
        height += 1
        block_time += 1
        tip = int(block2_orig.hash, 16)

        # Complete testing of CVE-2018-17144, by checking for the inflation bug.
        # Create a block that spends the output of a tx in a previous block.
        block4 = create_block(tip, create_coinbase(height), block_time)
        tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)

        # Duplicates input
        tx3.vin.append(tx3.vin[0])
        tx3.rehash()
        block4.vtx.append(tx3)
        block4.hashMerkleRoot = block4.calc_merkle_root()
        block4.rehash()
        block4.solve()
        self.log.info("Test inflation by duplicating input")
        node.p2p.send_blocks_and_test([block4], node, success=False,  reject_reason='bad-txns-inputs-duplicate')
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PDataStore())

        self.log.info("Generate blocks in the past for coinbase outputs.")
        long_past_time = int(time.time()) - 600 * 1000  # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
        self.nodes[0].setmocktime(long_past_time - 100)  # enough so that the generated blocks will still all be before long_past_time
        self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1)  # 82 blocks generated for inputs
        self.nodes[0].setmocktime(0)  # set time back to present so yielded blocks aren't in the future as we advance last_block_time
        self.tipheight = 82  # height of the next block to build
        self.last_block_time = long_past_time
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info("Test that the csv softfork is DEFINED")
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
        test_blocks = self.generate_blocks(61, 4)
        self.sync_blocks(test_blocks)

        self.log.info("Advance from DEFINED to STARTED, height = 143")
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')

        self.log.info("Fail to achieve LOCKED_IN")
        # 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks

        test_blocks = self.generate_blocks(50, 536870913)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(20, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(50, 536871169, test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(24, 536936448, test_blocks)  # 0x20010000 (signalling not)
        self.sync_blocks(test_blocks)

        self.log.info("Failed to advance past STARTED, height = 287")
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')

        self.log.info("Generate blocks to achieve LOCK-IN")
        # 108 out of 144 signal bit 0 to achieve lock-in
        # using a variety of bits to simulate multiple parallel softforks
        test_blocks = self.generate_blocks(58, 536870913)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(26, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(50, 536871169, test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(10, 536936448, test_blocks)  # 0x20010000 (signalling not)
        self.sync_blocks(test_blocks)

        self.log.info("Advanced from STARTED to LOCKED_IN, height = 431")
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')

        # Generate 140 more version 4 blocks
        test_blocks = self.generate_blocks(140, 4)
        self.sync_blocks(test_blocks)

        # Inputs at height = 572
        #
        # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
        # Note we reuse inputs for v1 and v2 txs so must test these separately
        # 16 normal inputs
        bip68inputs = []
        for i in range(16):
            bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))

        # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112basicinputs = []
        for j in range(2):
            inputs = []
            for i in range(16):
                inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
            bip112basicinputs.append(inputs)

        # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112diverseinputs = []
        for j in range(2):
            inputs = []
            for i in range(16):
                inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
            bip112diverseinputs.append(inputs)

        # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)

        # 1 normal input
        bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)

        self.nodes[0].setmocktime(self.last_block_time + 600)
        inputblockhash = self.nodes[0].generate(1)[0]  # 1 block generated for inputs to be in chain at height 572
        self.nodes[0].setmocktime(0)
        self.tip = int(inputblockhash, 16)
        self.tipheight += 1
        self.last_block_time += 600
        assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)

        # 2 more version 4 blocks
        test_blocks = self.generate_blocks(2, 4)
        self.sync_blocks(test_blocks)

        self.log.info("Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)")
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')

        # Test both version 1 and version 2 transactions for all tests
        # BIP113 test transaction will be modified before each use to put in appropriate block time
        bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
        bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v1.nVersion = 1
        bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
        bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v2.nVersion = 2

        # For BIP68 test all 16 relative sequence locktimes
        bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress)
        bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress)

        # For BIP112 test:
        # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress)
        bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress)
        # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
        bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
        # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
        bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
        # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
        bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1)
        # -1 OP_CSV OP_DROP input
        bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
        bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)

        self.log.info("TESTING")

        self.log.info("Pre-Soft Fork Tests. All txs should pass.")
        self.log.info("Test version 1 txs")

        success_txs = []
        # add BIP113 tx and -1 CSV tx
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        success_txs.append(bip113signed1)
        success_txs.append(bip112tx_special_v1)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        success_txs = []
        # add BIP113 tx and -1 CSV tx
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        success_txs.append(bip113signed2)
        success_txs.append(bip112tx_special_v2)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v2))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
        test_blocks = self.generate_blocks(1, 4)
        self.sync_blocks(test_blocks)
        assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')

        self.log.info("Post-Soft Fork Tests.")

        self.log.info("BIP 113 tests")
        # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        for bip113tx in [bip113signed1, bip113signed2]:
            self.sync_blocks([self.create_test_block([bip113tx])], success=False)
        # BIP 113 tests should now pass if the locktime is < MTP
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
        for bip113tx in [bip113signed1, bip113signed2]:
            self.sync_blocks([self.create_test_block([bip113tx])])
            self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Next block height = 580 after 4 blocks of random version
        test_blocks = self.generate_blocks(4, 1234)
        self.sync_blocks(test_blocks)

        self.log.info("BIP 68 tests")
        self.log.info("Test version 1 txs - all should still pass")

        success_txs = []
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
        bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
        self.sync_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
        bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
        for tx in bip68timetxs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
        for tx in bip68heighttxs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        # Advance one block to 581
        test_blocks = self.generate_blocks(1, 1234)
        self.sync_blocks(test_blocks)

        # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
        bip68success_txs.extend(bip68timetxs)
        self.sync_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        for tx in bip68heighttxs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        # Advance one block to 582
        test_blocks = self.generate_blocks(1, 1234)
        self.sync_blocks(test_blocks)

        # All BIP 68 txs should pass
        bip68success_txs.extend(bip68heighttxs)
        self.sync_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("BIP 112 tests")
        self.log.info("Test version 1 txs")

        # -1 OP_CSV tx should fail
        self.sync_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass

        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
        fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
        for tx in fail_txs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        self.log.info("Test version 2 txs")

        # -1 OP_CSV tx should fail
        self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False)

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]

        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##

        # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
        for tx in fail_txs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
        for tx in fail_txs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        # If sequencelock types mismatch, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
        for tx in fail_txs:
            self.sync_blocks([self.create_test_block([tx])], success=False)

        # Remaining txs should pass, just test masking works properly
        success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
        self.sync_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Additional test, of checking that comparison of two time types works properly
        time_txs = []
        for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
            tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
            signtx = sign_transaction(self.nodes[0], tx)
            time_txs.append(signtx)

        self.sync_blocks([self.create_test_block(time_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
    def run_test(self):
        (node, ) = self.nodes
        node.add_p2p_connection(P2PDataStore())
        # Get out of IBD
        node.generatetoaddress(1, node.get_deterministic_priv_key().address)

        tip = self.getbestblock(node)

        self.log.info("Create some blocks with OP_1 coinbase for spending.")
        blocks = []
        for _ in range(20):
            tip = self.build_block(tip)
            blocks.append(tip)
        node.p2p.send_blocks_and_test(blocks, node)
        self.spendable_outputs = deque(block.vtx[0] for block in blocks)

        self.log.info("Mature the blocks.")
        node.generatetoaddress(100, node.get_deterministic_priv_key().address)

        tip = self.getbestblock(node)

        # To make compact and fast-to-verify transactions, we'll use
        # CHECKDATASIG over and over with the same data.
        # (Using the same stuff over and over again means we get to hit the
        # node's signature cache and don't need to make new signatures every
        # time.)
        cds_message = b''
        # r=1 and s=1 ecdsa, the minimum values.
        cds_signature = bytes.fromhex('3006020101020101')
        # Recovered pubkey
        cds_pubkey = bytes.fromhex(
            '03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932'
        )

        fundings = []

        def make_spend(scriptpubkey, scriptsig):
            # Add a funding tx to fundings, and return a tx spending that using
            # scriptsig.
            self.log.debug(
                "Gen tx with locking script {} unlocking script {} .".format(
                    scriptpubkey.hex(), scriptsig.hex()))

            # get funds locked with OP_1
            sourcetx = self.spendable_outputs.popleft()
            # make funding that forwards to scriptpubkey
            fundtx = create_transaction(sourcetx, scriptpubkey)
            fundings.append(fundtx)

            # make the spending
            tx = CTransaction()
            tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig))
            tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
            pad_tx(tx)
            tx.rehash()
            return tx

        self.log.info("Generating txes used in this test")

        # "Good" txns that pass our rule:

        goodtxes = [
            # most dense allowed input -- 2 sigchecks with a 26-byte scriptsig.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 16, cds_signature])),

            # 4 sigchecks with a 112-byte scriptsig, just at the limit for this
            # sigchecks count.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP,
                    OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 101, cds_signature])),

            # "nice" transaction - 1 sigcheck with 9-byte scriptsig.
            make_spend(CScript([cds_message, cds_pubkey, OP_CHECKDATASIG]),
                       CScript([cds_signature])),

            # 1 sigcheck with 0-byte scriptsig.
            make_spend(
                CScript(
                    [cds_signature, cds_message, cds_pubkey, OP_CHECKDATASIG]),
                CScript([])),
        ]

        badtxes = [
            # "Bad" txns:
            # 2 sigchecks with a 25-byte scriptsig, just 1 byte too short.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 15, cds_signature])),

            # 4 sigchecks with a 111-byte scriptsig, just 1 byte too short.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP,
                    OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 100, cds_signature])),
        ]

        goodtxids = set(t.hash for t in goodtxes)
        badtxids = set(t.hash for t in badtxes)

        self.log.info("Funding the txes")
        tip = self.build_block(tip, fundings)
        node.p2p.send_blocks_and_test([tip], node)

        # Activation tests

        self.log.info("Approach to just before upgrade activation")
        # Move our clock to the uprade time so we will accept such
        # future-timestamped blocks.
        node.setmocktime(SIGCHECKS_ACTIVATION_TIME + 10)
        # Mine six blocks with timestamp starting at
        # SIGCHECKS_ACTIVATION_TIME-1
        blocks = []
        for i in range(-1, 5):
            tip = self.build_block(tip, nTime=SIGCHECKS_ACTIVATION_TIME + i)
            blocks.append(tip)
        node.p2p.send_blocks_and_test(blocks, node)
        assert_equal(node.getblockchaininfo()['mediantime'],
                     SIGCHECKS_ACTIVATION_TIME - 1)

        self.log.info(
            "The next block will activate, but the activation block itself must follow old rules"
        )

        self.log.info("Send all the transactions just before upgrade")

        node.p2p.send_txs_and_test(goodtxes, node)
        node.p2p.send_txs_and_test(badtxes, node)

        assert_equal(set(node.getrawmempool()), goodtxids | badtxids)

        # ask the node to mine a block, it should include the bad txes.
        [blockhash
         ] = node.generatetoaddress(1,
                                    node.get_deterministic_priv_key().address)
        assert_equal(set(node.getblock(blockhash, 1)['tx'][1:]),
                     goodtxids | badtxids)
        assert_equal(node.getrawmempool(), [])

        # discard that block
        node.invalidateblock(blockhash)
        assert_equal(set(node.getrawmempool()), goodtxids | badtxids)

        self.log.info("Mine the activation block itself")
        tip = self.build_block(tip)
        node.p2p.send_blocks_and_test([tip], node)

        self.log.info("We have activated!")
        assert_equal(node.getblockchaininfo()['mediantime'],
                     SIGCHECKS_ACTIVATION_TIME)

        self.log.info(
            "The high-sigchecks transactions got evicted but the good ones are still around"
        )
        assert_equal(set(node.getrawmempool()), goodtxids)

        self.log.info(
            "Now the high-sigchecks transactions are rejected from mempool.")
        # try sending some of the bad txes again after the upgrade
        for tx in badtxes:
            check_for_no_ban_on_rejected_tx(node, tx, TX_INPUT_SIGCHECKS_ERROR)
            assert_raises_rpc_error(-26, TX_INPUT_SIGCHECKS_ERROR,
                                    node.sendrawtransaction, ToHex(tx))

        self.log.info("But they can still be mined!")

        # now make a block with all the txes, they still are accepted in
        # blocks!
        tip = self.build_block(tip, goodtxes + badtxes)
        node.p2p.send_blocks_and_test([tip], node)

        assert_equal(node.getbestblockhash(), tip.hash)
    def run_test(self):
        (node, std_node) = self.nodes
        node.add_p2p_connection(P2PDataStore())
        std_node.add_p2p_connection(P2PDataStore())
        # Get out of IBD
        node.generatetoaddress(1, node.get_deterministic_priv_key().address)

        tip = self.getbestblock(node)

        self.log.info("Create some blocks with OP_1 coinbase for spending.")
        blocks = []
        for _ in range(20):
            tip = self.build_block(tip)
            blocks.append(tip)
        node.p2p.send_blocks_and_test(blocks, node, success=True)
        self.spendable_outputs = deque(block.vtx[0] for block in blocks)

        self.log.info("Mature the blocks.")
        node.generatetoaddress(100, node.get_deterministic_priv_key().address)

        tip = self.getbestblock(node)

        self.log.info("Generating some high-sigop transactions.")

        # Tx with 4001 sigops (valid but non standard)
        tx_4001 = create_transaction(self.spendable_outputs.popleft(), [
                                     OP_CHECKMULTISIG] * 200 + [OP_CHECKDATASIG])

        # Tx with 20001 sigops (consensus-invalid)
        tx_20001 = create_transaction(self.spendable_outputs.popleft(), [
                                      OP_CHECKMULTISIG] * 1000 + [OP_CHECKDATASIG])

        # P2SH tx with too many sigops (valid but nonstandard for std_node)
        redeem_script = bytes(
            [OP_IF, OP_CHECKMULTISIG, OP_ENDIF, OP_TRUE])
        p2sh_script = CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL])
        tx_fundp2sh = create_transaction(
            self.spendable_outputs.popleft(), p2sh_script)
        tx_spendp2sh = CTransaction()
        tx_spendp2sh.vin.append(
            CTxIn(COutPoint(tx_fundp2sh.sha256, 1), CScript([OP_FALSE, redeem_script])))
        tx_spendp2sh.vout.append(
            CTxOut(0, CScript([OP_RETURN, b'pad' * 20])))
        tx_spendp2sh.rehash()

        # Chain of 10 txes with 2000 sigops each.
        txes_10x2000_sigops = []
        tx = self.spendable_outputs.popleft()
        for _ in range(10):
            tx = create_transaction(tx, [OP_CHECKMULTISIG] * 100)
            txes_10x2000_sigops.append(tx)

        def make_hightotalsigop_block():
            # 20001 total sigops
            return self.build_block(
                tip, txes_10x2000_sigops, cbextrascript=bytes([OP_CHECKDATASIG]))

        def make_highsigop_coinbase_block():
            # 60000 sigops in the coinbase
            return self.build_block(
                tip, cbextrascript=bytes([OP_CHECKMULTISIG] * 3000))

        self.log.info(
            "Try various high-sigop transactions in blocks / mempool before upgrade")

        # mempool refuses over 4001.
        check_for_no_ban_on_rejected_tx(node, tx_4001, MEMPOOL_TXSIGOPS_ERROR)
        # it used to be that exceeding 20000 would cause a ban, but it's
        # important that this causes no ban: we want that upgraded nodes
        # can't get themselves banned by relaying huge-sigops transactions.
        check_for_no_ban_on_rejected_tx(node, tx_20001, MEMPOOL_TXSIGOPS_ERROR)

        # the 20001 tx can't be mined
        check_for_ban_on_rejected_block(node, self.build_block(
            tip, [tx_20001]), BLOCK_TXSIGOPS_ERROR)

        self.log.info(
            "The P2SH script has too many sigops (20 > 15) for a standard node.")
        # Mine the P2SH funding first because it's nonstandard.
        tip = self.build_block(tip, [tx_fundp2sh])
        std_node.p2p.send_blocks_and_test([tip], node)
        assert_raises_rpc_error(-26, MEMPOOL_P2SH_SIGOPS_ERROR,
                                std_node.sendrawtransaction, ToHex(tx_spendp2sh))

        self.log.info(
            "A bunch of 2000-sigops txes can be put in mempool but not mined all at once.")
        # Send the 2000-sigop transactions, which are acceptable.
        for tx in txes_10x2000_sigops:
            node.sendrawtransaction(ToHex(tx))

        # They can't be mined all at once if the coinbase has a single sigop
        # (total 20001)
        check_for_ban_on_rejected_block(
            node, make_hightotalsigop_block(), BLOCK_TOTALSIGOPS_ERROR)

        # Activation tests

        self.log.info("Approach to just before upgrade activation")
        # Move our clock to the uprade time so we will accept such
        # future-timestamped blocks.
        node.setmocktime(SIGOPS_DEACTIVATION_TIME)
        std_node.setmocktime(SIGOPS_DEACTIVATION_TIME)
        # Mine six blocks with timestamp starting at SIGOPS_DEACTIVATION_TIME-1
        blocks = []
        for i in range(-1, 5):
            tip = self.build_block(tip, nTime=SIGOPS_DEACTIVATION_TIME + i)
            blocks.append(tip)
        node.p2p.send_blocks_and_test(blocks, node)
        assert_equal(node.getblockchaininfo()[
                     'mediantime'], SIGOPS_DEACTIVATION_TIME - 1)

        self.log.info(
            "The next block will activate, but the activation block itself must follow old rules")

        check_for_ban_on_rejected_block(node, self.build_block(
            tip, [tx_20001]), BLOCK_TXSIGOPS_ERROR)
        check_for_ban_on_rejected_block(
            node, make_hightotalsigop_block(), BLOCK_TOTALSIGOPS_ERROR)
        check_for_ban_on_rejected_block(
            node, make_highsigop_coinbase_block(), BLOCK_TXSIGOPS_ERROR)

        self.log.info("Mine the activation block itself")
        tip = self.build_block(tip)
        node.p2p.send_blocks_and_test([tip], node)
        sync_blocks(self.nodes)

        self.log.info("We have activated!")
        assert_equal(node.getblockchaininfo()[
                     'mediantime'], SIGOPS_DEACTIVATION_TIME)
        assert_equal(std_node.getblockchaininfo()[
                     'mediantime'], SIGOPS_DEACTIVATION_TIME)

        # save this tip for later
        upgrade_block = tip

        self.log.info(
            "The mempool is now a free-for-all, and we can get all the high-sigops transactions in")
        std_node.sendrawtransaction(ToHex(tx_spendp2sh))
        node.sendrawtransaction(ToHex(tx_spendp2sh))
        node.sendrawtransaction(ToHex(tx_4001))
        node.sendrawtransaction(ToHex(tx_20001))
        # resend the 2000-sigop transactions, which will have expired due to
        # setmocktime.
        for tx in txes_10x2000_sigops:
            node.sendrawtransaction(ToHex(tx))

        alltxes = set(tx.hash for tx in [
                      tx_spendp2sh, tx_4001, tx_20001] + txes_10x2000_sigops)
        assert_equal(set(node.getrawmempool()), alltxes)

        self.log.info(
            "The miner will include all the high-sigops transactions at once, without issue.")
        node.generatetoaddress(1, node.get_deterministic_priv_key().address)
        tip = self.getbestblock(node)
        assert_equal(set(tx.rehash() for tx in tip.vtx[1:]), alltxes)
        # even though it is far smaller than one megabyte, we got in something
        # like 44000 sigops
        assert len(tip.serialize()) < ONE_MEGABYTE

        # save this tip for later
        postupgrade_block = tip

        # Deactivation tests

        self.log.info(
            "Invalidating the post-upgrade block returns the transactions to mempool")
        node.invalidateblock(postupgrade_block.hash)
        assert_equal(set(node.getrawmempool()), alltxes)

        self.log.info("Test some weird alternative blocks")
        tip = upgrade_block
        self.log.info("A 40000-sigop coinbase is acceptable now")
        tip = make_highsigop_coinbase_block()
        node.p2p.send_blocks_and_test([tip], node)
        self.log.info("We can get in our 20001 sigop total block")
        tip = make_hightotalsigop_block()
        node.p2p.send_blocks_and_test([tip], node)

        self.log.info(
            "Invalidating the upgrade block evicts the bad txes")
        goodtxes = alltxes - {tx_4001.hash, tx_20001.hash}
        # loose-rules node just evicts the too-many-sigops transactions
        node.invalidateblock(upgrade_block.hash)
        assert_equal(set(node.getrawmempool()), goodtxes)
        # std_node evicts everything as either nonstandard scriptpubkey or p2sh
        # too-many-sigops.
        std_node.invalidateblock(upgrade_block.hash)
        assert_equal(std_node.getrawmempool(), [])
Пример #8
0
    def run_test(self):
        """
         . Test msg header
        0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
           that it isn't an effective DoS against the node.

        1. Send an oversized (4MB+) message and check that we're disconnected.

        2. Send a few messages with an incorrect data size in the header, ensure the
           messages are ignored.
        """
        self.test_magic_bytes()
        self.test_checksum()
        self.test_size()
        self.test_command()

        node = self.nodes[0]
        self.node = node
        node.add_p2p_connection(P2PDataStore())
        conn2 = node.add_p2p_connection(P2PDataStore())

        msg_limit = 4 * 1000 * 1000  # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH
        valid_data_limit = msg_limit - 5  # Account for the 4-byte length prefix

        #
        # 0.
        #
        # Send as large a message as is valid, ensure we aren't disconnected but
        # also can't exhaust resources.
        #
        msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
        assert len(msg_at_size.serialize()) == msg_limit

        increase_allowed = 0.5
        if [
                s for s in os.environ.get("BITCOIN_CONFIG", "").split(" ")
                if "--with-sanitizers" in s and "address" in s
        ]:
            increase_allowed = 3.5
        with node.assert_memory_usage_stable(
                increase_allowed=increase_allowed):
            self.log.info("Sending a bunch of large, junk messages to test "
                          "memory exhaustion. May take a bit...")

            # Run a bunch of times to test for memory exhaustion.
            for _ in range(80):
                node.p2p.send_message(msg_at_size)

            # Check that, even though the node is being hammered by nonsense from one
            # connection, it can still service other peers in a timely way.
            for _ in range(20):
                conn2.sync_with_ping(timeout=2)

            # Peer 1, despite serving up a bunch of nonsense, should still be connected.
            self.log.info("Waiting for node to drop junk messages.")
            node.p2p.sync_with_ping(timeout=120)
            assert node.p2p.is_connected

        #
        # 1.
        #
        # Send an oversized message, ensure we're disconnected.
        #
        msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
        assert len(msg_over_size.serialize()) == (msg_limit + 1)

        with node.assert_debug_log(
            ["Oversized message from peer=4, disconnecting"]):
            # An unknown message type (or *any* message type) over
            # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
            node.p2p.send_message(msg_over_size)
            node.p2p.wait_for_disconnect(timeout=4)

        node.disconnect_p2ps()
        conn = node.add_p2p_connection(P2PDataStore())
        conn.wait_for_verack()

        #
        # 2.
        #
        # Send messages with an incorrect data size in the header.
        #
        actual_size = 100
        msg = msg_unrecognized(str_data="b" * actual_size)

        # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
        for wrong_size in (2, 77, 78, 79):
            self.log.info("Sending a message with incorrect size of {}".format(
                wrong_size))

            # Unmodified message should submit okay.
            node.p2p.send_and_ping(msg)

            # A message lying about its data size results in a disconnect when the incorrect
            # data size is less than the actual size.
            #
            # TODO: why does behavior change at 78 bytes?
            #
            node.p2p.send_raw_message(
                self._tweak_msg_data_size(msg, wrong_size))

            # For some reason unknown to me, we sometimes have to push additional data to the
            # peer in order for it to realize a disconnect.
            try:
                node.p2p.send_message(messages.msg_ping(nonce=123123))
            except IOError:
                pass

            node.p2p.wait_for_disconnect(timeout=10)
            node.disconnect_p2ps()
            node.add_p2p_connection(P2PDataStore())

        # Node is still up.
        conn = node.add_p2p_connection(P2PDataStore())
        conn.sync_with_ping()
Пример #9
0
    def run_test(self):
        node = self.nodes[0]
        node.add_p2p_connection(P2PDataStore())
        node.setmocktime(ACTIVATION_TIME)

        self.genesis_hash = int(node.getbestblockhash(), 16)
        self.block_heights[self.genesis_hash] = 0
        spendable_outputs = []

        # save the current tip so it can be spent by a later block
        def save_spendable_output():
            spendable_outputs.append(self.tip)

        # get an output that we previously marked as spendable
        def get_spendable_output():
            return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)

        # move the tip back to a previous block
        def tip(number):
            self.tip = self.blocks[number]

        # adds transactions to the block and updates state
        def update_block(block_number, new_transactions):
            block = self.blocks[block_number]
            block.vtx.extend(new_transactions)
            old_sha256 = block.sha256
            make_conform_to_ctor(block)
            block.hashMerkleRoot = block.calc_merkle_root()
            block.solve()
            # Update the internal state just like in next_block
            self.tip = block
            if block.sha256 != old_sha256:
                self.block_heights[
                    block.sha256] = self.block_heights[old_sha256]
                del self.block_heights[old_sha256]
            self.blocks[block_number] = block
            return block

        # send a txn to the mempool and check it was accepted
        def send_transaction_to_mempool(tx):
            tx_id = node.sendrawtransaction(ToHex(tx))
            assert tx_id in node.getrawmempool()

        # checks the mempool has exactly the same txns as in the provided list
        def check_mempool_equal(txns):
            assert set(node.getrawmempool()) == set(tx.hash for tx in txns)

        # Create an always-valid chained transaction. It spends a
        # scriptPub=OP_TRUE coin into another. Returns the transaction and its
        # spendable output for further chaining.
        def create_always_valid_chained_tx(spend):
            tx = create_tx_with_script(spend.tx,
                                       spend.n,
                                       b'',
                                       amount=spend.tx.vout[0].nValue - 1000,
                                       script_pub_key=CScript([OP_TRUE]))
            tx.rehash()
            return tx, PreviousSpendableOutput(tx, 0)

        # shorthand
        block = self.next_block

        # Create a new block
        block(0)
        save_spendable_output()
        node.p2p.send_blocks_and_test([self.tip], node)

        # Now we need that block to mature so we can spend the coinbase.
        maturity_blocks = []
        for i in range(110):
            block(5000 + i)
            maturity_blocks.append(self.tip)
            save_spendable_output()
        node.p2p.send_blocks_and_test(maturity_blocks, node)

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(100):
            out.append(get_spendable_output())

        # Create 2 pre-fork-only txns (tx_pre0, tx_pre1). Fund txns are valid
        # pre-fork, so we can mine them right away.
        txfund0, tx_pre0 = create_fund_and_pre_fork_only_tx(out[0])
        txfund1, tx_pre1 = create_fund_and_pre_fork_only_tx(out[1])

        # Create 2 post-fork-only txns (tx_post0, tx_post1). Fund txns are
        # valid pre-fork, so we can mine them right away.
        txfund2, tx_post0 = create_fund_and_post_fork_only_tx(out[2])
        txfund3, tx_post1 = create_fund_and_post_fork_only_tx(out[3])

        # Create blocks to activate the fork. Mine all funding transactions.
        bfork = block(5555)
        bfork.nTime = ACTIVATION_TIME - 1
        update_block(5555, [txfund0, txfund1, txfund2, txfund3])
        node.p2p.send_blocks_and_test([self.tip], node)

        for i in range(5):
            node.p2p.send_blocks_and_test([block(5200 + i)], node)

        # Check we are just before the activation time
        assert_equal(node.getblockchaininfo()['mediantime'],
                     ACTIVATION_TIME - 1)

        # We are just before the fork. Pre-fork-only and always-valid chained
        # txns (tx_chain0, tx_chain1) are valid, post-fork-only txns are
        # rejected.
        send_transaction_to_mempool(tx_pre0)
        send_transaction_to_mempool(tx_pre1)
        tx_chain0, last_chained_output = create_always_valid_chained_tx(out[4])
        tx_chain1, last_chained_output = create_always_valid_chained_tx(
            last_chained_output)
        send_transaction_to_mempool(tx_chain0)
        send_transaction_to_mempool(tx_chain1)
        assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
                                node.sendrawtransaction, ToHex(tx_post0))
        assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
                                node.sendrawtransaction, ToHex(tx_post1))
        check_mempool_equal([tx_chain0, tx_chain1, tx_pre0, tx_pre1])

        # Activate the fork. Mine the 1st always-valid chained txn and a
        # pre-fork-only txn.
        block(5556)
        update_block(5556, [tx_chain0, tx_pre0])
        node.p2p.send_blocks_and_test([self.tip], node)
        forkblockid = node.getbestblockhash()

        # Check we just activated the fork
        assert_equal(
            node.getblockheader(forkblockid)['mediantime'], ACTIVATION_TIME)

        # Check mempool coherence when activating the fork. Pre-fork-only txns
        # were evicted from the mempool, while always-valid txns remain.
        # Evicted: tx_pre1
        check_mempool_equal([tx_chain1])

        # Post-fork-only and always-valid txns are accepted, pre-fork-only txn
        # are rejected.
        send_transaction_to_mempool(tx_post0)
        send_transaction_to_mempool(tx_post1)
        tx_chain2, _ = create_always_valid_chained_tx(last_chained_output)
        send_transaction_to_mempool(tx_chain2)
        assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR,
                                node.sendrawtransaction, ToHex(tx_pre1))
        check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1])

        # Mine the 2nd always-valid chained txn and a post-fork-only txn.
        block(5557)
        update_block(5557, [tx_chain1, tx_post0])
        node.p2p.send_blocks_and_test([self.tip], node)
        postforkblockid = node.getbestblockhash()
        # The mempool contains the 3rd chained txn and a post-fork-only txn.
        check_mempool_equal([tx_chain2, tx_post1])

        # In the following we will testing block disconnections and reorgs.
        # - tx_chain2 will always be retained in the mempool since it is always
        #   valid. Its continued presence shows that we are never simply
        #   clearing the entire mempool.
        # - tx_post1 may be evicted from mempool if we land before the fork.
        # - tx_post0 is in a block and if 'de-mined', it will either be evicted
        #   or end up in mempool depending if we land before/after the fork.
        # - tx_pre0 is in a block and if 'de-mined', it will either be evicted
        #   or end up in mempool depending if we land after/before the fork.

        # First we do a disconnection of the post-fork block, which is a
        # normal disconnection that merely returns the block contents into
        # the mempool -- nothing is lost.
        node.invalidateblock(postforkblockid)
        # In old mempool: tx_chain2, tx_post1
        # Recovered from blocks: tx_chain1 and tx_post0.
        # Lost from blocks: NONE
        # Retained from old mempool: tx_chain2, tx_post1
        # Evicted from old mempool: NONE
        check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1])

        # Now, disconnect the fork block. This is a special disconnection
        # that requires reprocessing the mempool due to change in rules.
        node.invalidateblock(forkblockid)
        # In old mempool: tx_chain1, tx_chain2, tx_post0, tx_post1
        # Recovered from blocks: tx_chain0, tx_pre0
        # Lost from blocks: NONE
        # Retained from old mempool: tx_chain1, tx_chain2
        # Evicted from old mempool: tx_post0, tx_post1
        check_mempool_equal([tx_chain0, tx_chain1, tx_chain2, tx_pre0])

        # Restore state
        node.reconsiderblock(postforkblockid)
        node.reconsiderblock(forkblockid)
        send_transaction_to_mempool(tx_post1)
        check_mempool_equal([tx_chain2, tx_post1])

        # Test a reorg that crosses the fork.

        # If such a reorg happens, most likely it will both start *and end*
        # after the fork. We will test such a case here and make sure that
        # post-fork-only transactions are not unnecessarily discarded from
        # the mempool in such a reorg. Pre-fork-only transactions however can
        # get lost.

        # Set up a longer competing chain that doesn't confirm any of our txns.
        # This starts after 5204, so it contains neither the forkblockid nor
        # the postforkblockid from above.
        tip(5204)
        reorg_blocks = []
        for i in range(3):
            reorg_blocks.append(block(5900 + i))

        # Perform the reorg
        node.p2p.send_blocks_and_test(reorg_blocks, node)
        # reorg finishes after the fork
        assert_equal(node.getblockchaininfo()['mediantime'],
                     ACTIVATION_TIME + 2)
        # In old mempool: tx_chain2, tx_post1
        # Recovered from blocks: tx_chain0, tx_chain1, tx_post0
        # Lost from blocks: tx_pre0
        # Retained from old mempool: tx_chain2, tx_post1
        # Evicted from old mempool: NONE
        check_mempool_equal(
            [tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1])
Пример #10
0
    def run_test(self):
        node = self.nodes[0]
        default_p2p = node.add_p2p_connection(P2PDataStore())
        test_p2p = node.add_p2p_connection(TestP2PConn())

        self.genesis_hash = int(node.getbestblockhash(), 16)
        self.block_heights[self.genesis_hash] = 0
        spendable_outputs = []

        # save the current tip so it can be spent by a later block
        def save_spendable_output():
            spendable_outputs.append(self.tip)

        # get an output that we previously marked as spendable
        def get_spendable_output():
            return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)

        # move the tip back to a previous block
        def tip(number):
            self.tip = self.blocks[number]

        # shorthand for functions
        block = self.next_block

        # Create a new block
        block(0)
        save_spendable_output()
        default_p2p.send_blocks_and_test([self.tip], node)

        # Now we need that block to mature so we can spend the coinbase.
        maturity_blocks = []
        for i in range(99):
            block(5000 + i)
            maturity_blocks.append(self.tip)
            save_spendable_output()

        # Get to one block of the May 15, 2018 HF activation
        for i in range(6):
            block(5100 + i)
            maturity_blocks.append(self.tip)

        # Send it all to the node at once.
        default_p2p.send_blocks_and_test(maturity_blocks, node)

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(100):
            out.append(get_spendable_output())

        # Check that compact block also work for big blocks
        # Wait for SENDCMPCT
        def received_sendcmpct():
            return (test_p2p.last_sendcmpct is not None)

        wait_until(received_sendcmpct, timeout=30)

        sendcmpct = msg_sendcmpct()
        sendcmpct.version = 1
        sendcmpct.announce = True
        test_p2p.send_and_ping(sendcmpct)

        # Exchange headers
        def received_getheaders():
            return (test_p2p.last_getheaders is not None)

        wait_until(received_getheaders, timeout=30)

        # Return the favor
        test_p2p.send_message(test_p2p.last_getheaders)

        # Wait for the header list
        def received_headers():
            return (test_p2p.last_headers is not None)

        wait_until(received_headers, timeout=30)

        # It's like we know about the same headers !
        test_p2p.send_message(test_p2p.last_headers)

        # Send a block
        b1 = block(1, spend=out[0], block_size=ONE_MEGABYTE + 1)
        default_p2p.send_blocks_and_test([self.tip], node)

        # Checks the node to forward it via compact block
        def received_block():
            return (test_p2p.last_cmpctblock is not None)

        wait_until(received_block, timeout=30)

        # Was it our block ?
        cmpctblk_header = test_p2p.last_cmpctblock.header_and_shortids.header
        cmpctblk_header.calc_sha256()
        assert cmpctblk_header.sha256 == b1.sha256

        # Send a large block with numerous transactions.
        test_p2p.clear_block_data()
        b2 = block(2,
                   spend=out[1],
                   extra_txns=70000,
                   block_size=self.excessive_block_size - 1000)
        default_p2p.send_blocks_and_test([self.tip], node)

        # Checks the node forwards it via compact block
        wait_until(received_block, timeout=30)

        # Was it our block ?
        cmpctblk_header = test_p2p.last_cmpctblock.header_and_shortids.header
        cmpctblk_header.calc_sha256()
        assert cmpctblk_header.sha256 == b2.sha256

        # In order to avoid having to resend a ton of transactions, we invalidate
        # b2, which will send all its transactions in the mempool. Note that this
        # assumes reorgs will insert low-fee transactions back into the
        # mempool.
        node.invalidateblock(node.getbestblockhash())

        # Let's send a compact block and see if the node accepts it.
        # Let's modify b2 and use it so that we can reuse the mempool.
        tx = b2.vtx[0]
        tx.vout.append(CTxOut(0, CScript([random.randint(0, 256), OP_RETURN])))
        tx.rehash()
        b2.vtx[0] = tx
        b2.hashMerkleRoot = b2.calc_merkle_root()
        b2.solve()

        # Now we create the compact block and send it
        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(b2)
        test_p2p.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        # Check that compact block is received properly
        assert int(node.getbestblockhash(), 16) == b2.sha256
Пример #11
0
    def run_test(self):
        node = self.nodes[0]
        node.add_p2p_connection(P2PDataStore())

        # Set the blocksize to 2MB as initial condition
        node.setexcessiveblock(self.excessive_block_size)

        self.genesis_hash = int(node.getbestblockhash(), 16)
        self.block_heights[self.genesis_hash] = 0
        spendable_outputs = []

        # save the current tip so it can be spent by a later block
        def save_spendable_output():
            spendable_outputs.append(self.tip)

        # get an output that we previously marked as spendable
        def get_spendable_output():
            return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)

        # move the tip back to a previous block
        def tip(number):
            self.tip = self.blocks[number]

        # adds transactions to the block and updates state
        def update_block(block_number, new_transactions):
            block = self.blocks[block_number]
            self.add_transactions_to_block(block, new_transactions)
            old_sha256 = block.sha256
            make_conform_to_ctor(block)
            block.hashMerkleRoot = block.calc_merkle_root()
            block.solve()
            # Update the internal state just like in next_block
            self.tip = block
            if block.sha256 != old_sha256:
                self.block_heights[
                    block.sha256] = self.block_heights[old_sha256]
                del self.block_heights[old_sha256]
            self.blocks[block_number] = block
            return block

        # shorthand for functions
        block = self.next_block

        # Create a new block
        block(0)
        save_spendable_output()
        node.p2p.send_blocks_and_test([self.tip], node)

        # Now we need that block to mature so we can spend the coinbase.
        maturity_blocks = []
        for i in range(99):
            block(5000 + i)
            maturity_blocks.append(self.tip)
            save_spendable_output()
        node.p2p.send_blocks_and_test(maturity_blocks, node)

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(100):
            out.append(get_spendable_output())

        # Let's build some blocks and test them.
        for i in range(16):
            n = i + 1
            block(n, spend=out[i], block_size=n * ONE_MEGABYTE)
            node.p2p.send_blocks_and_test([self.tip], node)

        # block of maximal size
        block(17, spend=out[16], block_size=self.excessive_block_size)
        node.p2p.send_blocks_and_test([self.tip], node)

        # Reject oversized blocks with bad-blk-length error
        block(18, spend=out[17], block_size=self.excessive_block_size + 1)
        node.p2p.send_blocks_and_test([self.tip],
                                      node,
                                      success=False,
                                      reject_reason='bad-blk-length')

        # Rewind bad block.
        tip(17)

        # Submit a very large block via RPC
        large_block = block(33,
                            spend=out[17],
                            block_size=self.excessive_block_size)
        assert_equal(node.submitblock(ToHex(large_block)), None)
Пример #12
0
    def run_test(self):
        """
         . Test msg header
        0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
           that it isn't an effective DoS against the node.

        1. Send an oversized (4MB+) message and check that we're disconnected.

        2. Send a few messages with an incorrect data size in the header, ensure the
           messages are ignored.
        """
        self.test_magic_bytes()
        self.test_checksum()
        self.test_size()
        self.test_msgtype()
        self.test_large_inv()

        node = self.nodes[0]
        self.node = node
        node.add_p2p_connection(P2PDataStore())
        conn2 = node.add_p2p_connection(P2PDataStore())

        msg_limit = 32 * 1024 * 1024  # 32MiB, per MAX_PROTOCOL_MESSAGE_LENGTH
        valid_data_limit = msg_limit - 5  # Account for the 4-byte length prefix

        #
        # 0.
        #
        # Send as large a message as is valid, ensure we aren't disconnected but
        # also can't exhaust resources.
        #
        msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
        assert len(msg_at_size.serialize()) == msg_limit

        self.log.info(
            "Sending a bunch of large, junk messages to test memory exhaustion. May take a bit..."
        )

        # Run a bunch of times to test for memory exhaustion.
        # Upstream uses 80 iterations here, but its messages are 8x smaller.
        # So with 10 iterations, we get the same amount of junk data sent
        # to the node.  If we use 80 here, Python uses an insane amount of
        # memory by itself.
        for _ in range(10):
            node.p2p.send_message(msg_at_size)

        # Check that, even though the node is being hammered by nonsense from one
        # connection, it can still service other peers in a timely way.
        for _ in range(20):
            conn2.sync_with_ping(timeout=2)

        # Peer 1, despite serving up a bunch of nonsense, should still be connected.
        self.log.info("Waiting for node to drop junk messages.")
        node.p2p.sync_with_ping(timeout=400)
        assert node.p2p.is_connected

        #
        # 1.
        #
        # Send an oversized message, ensure we're disconnected.
        #
        # Under macOS this test is skipped due to an unexpected error code
        # returned from the closing socket which python/asyncio does not
        # yet know how to handle.
        #
        if sys.platform != 'darwin':
            msg_over_size = msg_unrecognized(str_data="b" *
                                             (valid_data_limit + 1))
            assert len(msg_over_size.serialize()) == (msg_limit + 1)

            # An unknown message type (or *any* message type) over
            # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
            node.p2p.send_message(msg_over_size)
            node.p2p.wait_for_disconnect(timeout=4)

            node.disconnect_p2ps()
            conn = node.add_p2p_connection(P2PDataStore())
            conn.wait_for_verack()
        else:
            self.log.info(
                "Skipping test p2p_invalid_messages/1 (oversized message) under macOS"
            )

        #
        # 2.
        #
        # Send messages with an incorrect data size in the header.
        #
        actual_size = 100
        msg = msg_unrecognized(str_data="b" * actual_size)

        # TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
        for wrong_size in (2, 77, 78, 79):
            self.log.info("Sending a message with incorrect size of {}".format(
                wrong_size))

            # Unmodified message should submit okay.
            node.p2p.send_and_ping(msg)

            # A message lying about its data size results in a disconnect when the incorrect
            # data size is less than the actual size.
            #
            # TODO: why does behavior change at 78 bytes?
            #
            node.p2p.send_raw_message(
                self._tweak_msg_data_size(msg, wrong_size))

            # For some reason unknown to me, we sometimes have to push additional data to the
            # peer in order for it to realize a disconnect.
            try:
                node.p2p.send_message(messages.msg_ping(nonce=123123))
            except IOError:
                pass

            node.p2p.wait_for_disconnect(timeout=10)
            node.disconnect_p2ps()
            node.add_p2p_connection(P2PDataStore())

        # Node is still up.
        conn = node.add_p2p_connection(P2PDataStore())
Пример #13
0
    def run_test(self):
        self.mine_chain()
        node = self.nodes[0]

        def assert_submitblock(block, result_str_1, result_str_2=None):
            block.solve()
            result_str_2 = result_str_2 or 'duplicate-invalid'
            assert_equal(result_str_1,
                         node.submitblock(hexdata=b2x(block.serialize())))
            assert_equal(result_str_2,
                         node.submitblock(hexdata=b2x(block.serialize())))

        self.log.info('getmininginfo')
        mining_info = node.getmininginfo()
        assert_equal(mining_info['blocks'], 200)
        assert_equal(mining_info['chain'], 'regtest')
        assert 'currentblocktx' not in mining_info
        assert 'currentblockweight' not in mining_info
        assert_equal(mining_info['difficulty'],
                     Decimal('4.656542373906925E-10'))
        assert_equal(mining_info['networkhashps'],
                     Decimal('0.003333333333333334'))
        assert_equal(mining_info['pooledtx'], 0)

        # Mine a block to leave initial block download
        node.generatetoaddress(1, node.get_deterministic_priv_key().address)
        tmpl = node.getblocktemplate({'rules': ['segwit']})
        self.log.info("getblocktemplate: Test capability advertised")
        assert 'proposal' in tmpl['capabilities']
        assert 'coinbasetxn' not in tmpl

        next_height = int(tmpl["height"])
        coinbase_tx = create_coinbase(height=next_height)
        # sequence numbers must not be max for nLockTime to have effect
        coinbase_tx.vin[0].nSequence = 2**32 - 2
        coinbase_tx.rehash()

        # round-trip the encoded bip34 block height commitment
        assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig),
                     next_height)
        # round-trip negative and multi-byte CScriptNums to catch python regression
        assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))),
                     1500)
        assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))),
                     -1500)
        assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1)

        block = CBlock()
        block.nVersion = tmpl["version"]
        block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
        block.nTime = tmpl["curtime"]
        block.nBits = int(tmpl["bits"], 16)
        block.nNonce = 0
        block.vtx = [coinbase_tx]

        self.log.info("getblocktemplate: segwit rule must be set")
        assert_raises_rpc_error(
            -8, "getblocktemplate must be called with the segwit rule set",
            node.getblocktemplate)

        self.log.info("getblocktemplate: Test valid block")
        assert_template(node, block, None)

        self.log.info("submitblock: Test block decode failure")
        assert_raises_rpc_error(-22, "Block decode failed", node.submitblock,
                                b2x(block.serialize()[:-15]))

        self.log.info(
            "getblocktemplate: Test bad input hash for coinbase transaction")
        bad_block = copy.deepcopy(block)
        bad_block.vtx[0].vin[0].prevout.hash += 1
        bad_block.vtx[0].rehash()
        assert_template(node, bad_block, 'bad-cb-missing')

        self.log.info("submitblock: Test invalid coinbase transaction")
        assert_raises_rpc_error(-22, "Block does not start with a coinbase",
                                node.submitblock, b2x(bad_block.serialize()))

        self.log.info("getblocktemplate: Test truncated final transaction")
        assert_raises_rpc_error(
            -22, "Block decode failed", node.getblocktemplate, {
                'data': b2x(block.serialize()[:-1]),
                'mode': 'proposal',
                'rules': ['segwit']
            })

        self.log.info("getblocktemplate: Test duplicate transaction")
        bad_block = copy.deepcopy(block)
        bad_block.vtx.append(bad_block.vtx[0])
        assert_template(node, bad_block, 'bad-txns-duplicate')
        assert_submitblock(bad_block, 'bad-txns-duplicate',
                           'bad-txns-duplicate')

        self.log.info("getblocktemplate: Test invalid transaction")
        bad_block = copy.deepcopy(block)
        bad_tx = copy.deepcopy(bad_block.vtx[0])
        bad_tx.vin[0].prevout.hash = 255
        bad_tx.rehash()
        bad_block.vtx.append(bad_tx)
        assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
        assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent')

        self.log.info("getblocktemplate: Test nonfinal transaction")
        bad_block = copy.deepcopy(block)
        bad_block.vtx[0].nLockTime = 2**32 - 1
        bad_block.vtx[0].rehash()
        assert_template(node, bad_block, 'bad-txns-nonfinal')
        assert_submitblock(bad_block, 'bad-txns-nonfinal')

        self.log.info("getblocktemplate: Test bad tx count")
        # The tx count is immediately after the block header
        bad_block_sn = bytearray(block.serialize())
        assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1)
        bad_block_sn[BLOCK_HEADER_SIZE] += 1
        assert_raises_rpc_error(-22, "Block decode failed",
                                node.getblocktemplate, {
                                    'data': b2x(bad_block_sn),
                                    'mode': 'proposal',
                                    'rules': ['segwit']
                                })

        self.log.info("getblocktemplate: Test bad bits")
        bad_block = copy.deepcopy(block)
        bad_block.nBits = 469762303  # impossible in the real world
        assert_template(node, bad_block, 'bad-diffbits')

        self.log.info("getblocktemplate: Test bad merkle root")
        bad_block = copy.deepcopy(block)
        bad_block.hashMerkleRoot += 1
        assert_template(node, bad_block, 'bad-txnmrklroot', False)
        assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot')

        self.log.info("getblocktemplate: Test bad timestamps")
        bad_block = copy.deepcopy(block)
        bad_block.nTime = 2**31 - 1
        assert_template(node, bad_block, 'time-too-new')
        assert_submitblock(bad_block, 'time-too-new', 'time-too-new')
        bad_block.nTime = 0
        assert_template(node, bad_block, 'time-too-old')
        assert_submitblock(bad_block, 'time-too-old', 'time-too-old')

        self.log.info("getblocktemplate: Test not best block")
        bad_block = copy.deepcopy(block)
        bad_block.hashPrevBlock = 123
        assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
        assert_submitblock(bad_block, 'prev-blk-not-found',
                           'prev-blk-not-found')

        self.log.info('submitheader tests')
        assert_raises_rpc_error(
            -22, 'Block header decode failed',
            lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE))
        assert_raises_rpc_error(
            -22, 'Block header decode failed',
            lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE - 2)))
        assert_raises_rpc_error(
            -25, 'Must submit previous header',
            lambda: node.submitheader(hexdata=b2x(
                super(CBlock, bad_block).serialize())))

        block.nTime += 1
        block.solve()

        def chain_tip(b_hash, *, status='headers-only', branchlen=1):
            return {
                'hash': b_hash,
                'height': 202,
                'branchlen': branchlen,
                'status': status
            }

        assert chain_tip(block.hash) not in node.getchaintips()
        node.submitheader(hexdata=b2x(block.serialize()))
        assert chain_tip(block.hash) in node.getchaintips()
        node.submitheader(hexdata=b2x(CBlockHeader(block).serialize()))  # Noop
        assert chain_tip(block.hash) in node.getchaintips()

        bad_block_root = copy.deepcopy(block)
        bad_block_root.hashMerkleRoot += 2
        bad_block_root.solve()
        assert chain_tip(bad_block_root.hash) not in node.getchaintips()
        node.submitheader(
            hexdata=b2x(CBlockHeader(bad_block_root).serialize()))
        assert chain_tip(bad_block_root.hash) in node.getchaintips()
        # Should still reject invalid blocks, even if we have the header:
        assert_equal(node.submitblock(hexdata=b2x(bad_block_root.serialize())),
                     'bad-txnmrklroot')
        assert_equal(node.submitblock(hexdata=b2x(bad_block_root.serialize())),
                     'bad-txnmrklroot')
        assert chain_tip(bad_block_root.hash) in node.getchaintips()
        # We know the header for this invalid block, so should just return early without error:
        node.submitheader(
            hexdata=b2x(CBlockHeader(bad_block_root).serialize()))
        assert chain_tip(bad_block_root.hash) in node.getchaintips()

        bad_block_lock = copy.deepcopy(block)
        bad_block_lock.vtx[0].nLockTime = 2**32 - 1
        bad_block_lock.vtx[0].rehash()
        bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root()
        bad_block_lock.solve()
        assert_equal(node.submitblock(hexdata=b2x(bad_block_lock.serialize())),
                     'bad-txns-nonfinal')
        assert_equal(node.submitblock(hexdata=b2x(bad_block_lock.serialize())),
                     'duplicate-invalid')
        # Build a "good" block on top of the submitted bad block
        bad_block2 = copy.deepcopy(block)
        bad_block2.hashPrevBlock = bad_block_lock.sha256
        bad_block2.solve()
        assert_raises_rpc_error(
            -25, 'bad-prevblk', lambda: node.submitheader(hexdata=b2x(
                CBlockHeader(bad_block2).serialize())))

        # Should reject invalid header right away
        bad_block_time = copy.deepcopy(block)
        bad_block_time.nTime = 1
        bad_block_time.solve()
        assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(
            hexdata=b2x(CBlockHeader(bad_block_time).serialize()))
                                )  # TODO Fix high-hash error

        # Should ask for the block from a p2p node, if they announce the header as well:
        node.add_p2p_connection(P2PDataStore())
        node.p2p.wait_for_getheaders(timeout=5)  # Drop the first getheaders
        node.p2p.send_blocks_and_test(blocks=[block], node=node)
        # Must be active now:
        assert chain_tip(block.hash, status='active',
                         branchlen=0) in node.getchaintips()

        # Building a few blocks should give the same results
        node.generatetoaddress(10, node.get_deterministic_priv_key().address)
        assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(
            hexdata=b2x(CBlockHeader(bad_block_time).serialize()))
                                )  # # TODO Fix high-hash error
        assert_raises_rpc_error(
            -25, 'bad-prevblk', lambda: node.submitheader(hexdata=b2x(
                CBlockHeader(bad_block2).serialize())))
        node.submitheader(hexdata=b2x(CBlockHeader(block).serialize()))
        node.submitheader(
            hexdata=b2x(CBlockHeader(bad_block_root).serialize()))
        assert_equal(node.submitblock(hexdata=b2x(block.serialize())),
                     'duplicate')  # valid
    def run_test(self):
        node = self.nodes[0]
        node.add_p2p_connection(P2PDataStore())
        # Allocate as many UTXOs as are needed
        num_utxos = sum(
            len(test_case['sig_hash_types']) for test_case in TESTCASES
            if isinstance(test_case, dict))

        value = int(SUBSIDY * 1_000_000)
        fee = 10_000

        max_utxo_value = (value - fee) // num_utxos
        private_keys = []
        public_keys = []
        spendable_outputs = []
        executed_scripts = []
        utxo_idx = 0
        # Prepare UTXOs for the tests below
        for test_case in TESTCASES:
            if test_case == 'ENABLE_REPLAY_PROTECTION':
                continue
            for _ in test_case['sig_hash_types']:
                private_key = ECKey()
                private_key.generate()
                private_keys.append(private_key)
                public_key = private_key.get_pubkey()
                public_keys.append(public_key)
                utxo_value = max_utxo_value - utxo_idx * 100  # deduct 100*i coins for unique amounts
                if test_case.get('opcodes', False):
                    opcodes = test_case['opcodes']
                    redeem_script = CScript(
                        opcodes + [public_key.get_bytes(), OP_CHECKSIG])
                    executed_scripts.append(redeem_script)
                    utxo_script = CScript(
                        [OP_HASH160,
                         hash160(redeem_script), OP_EQUAL])
                elif test_case.get('is_p2pk', False):
                    utxo_script = CScript(
                        [public_key.get_bytes(), OP_CHECKSIG])
                    executed_scripts.append(utxo_script)
                else:
                    utxo_script = CScript([
                        OP_DUP, OP_HASH160,
                        hash160(public_key.get_bytes()), OP_EQUALVERIFY,
                        OP_CHECKSIG
                    ])
                    executed_scripts.append(utxo_script)
                spendable_outputs.append(CTxOut(utxo_value, utxo_script))
                utxo_idx += 1

        anyonecanspend_address = node.decodescript('51')['p2sh']
        burn_address = node.decodescript('00')['p2sh']
        p2sh_script = CScript([OP_HASH160, bytes(20), OP_EQUAL])
        node.generatetoaddress(1, anyonecanspend_address)
        node.generatetoaddress(100, burn_address)

        # Build and send fan-out transaction creating all the UTXOs
        block_hash = node.getblockhash(1)
        coin = int(node.getblock(block_hash)['tx'][0], 16)
        tx_fan_out = CTransaction()
        tx_fan_out.vin.append(CTxIn(COutPoint(coin, 1), CScript([b'\x51'])))
        tx_fan_out.vout = spendable_outputs
        tx_fan_out.rehash()

        node.p2p.send_txs_and_test([tx_fan_out], node)

        utxo_idx = 0
        key_idx = 0
        for test_case in TESTCASES:
            if test_case == 'ENABLE_REPLAY_PROTECTION':
                node.setmocktime(ACTIVATION_TIME)
                node.generatetoaddress(11, burn_address)
                continue
            # Build tx for this test, will broadcast later
            tx = CTransaction()
            num_inputs = len(test_case['sig_hash_types'])
            spent_outputs = spendable_outputs[:num_inputs]
            del spendable_outputs[:num_inputs]
            assert len(spent_outputs) == num_inputs
            total_input_amount = sum(output.nValue for output in spent_outputs)
            max_output_amount = (total_input_amount -
                                 fee) // test_case['outputs']
            for i in range(test_case['outputs']):
                output_amount = max_output_amount - i * 77
                output_script = CScript(
                    [OP_HASH160, i.to_bytes(20, 'big'), OP_EQUAL])
                tx.vout.append(CTxOut(output_amount, output_script))
            for _ in test_case['sig_hash_types']:
                tx.vin.append(
                    CTxIn(COutPoint(tx_fan_out.txid, utxo_idx), CScript()))
                utxo_idx += 1
            # Keep unsigned tx for signrawtransactionwithkey below
            unsigned_tx = tx.serialize().hex()
            private_keys_wif = []
            sign_inputs = []
            # Make list of inputs for signrawtransactionwithkey
            for i, spent_output in enumerate(spent_outputs):
                sign_inputs.append({
                    'txid':
                    tx_fan_out.txid_hex,
                    'vout':
                    key_idx + i,
                    'amount':
                    Decimal(spent_output.nValue) / COIN,
                    'scriptPubKey':
                    spent_output.scriptPubKey.hex(),
                })
            for i, sig_hash_type in enumerate(test_case['sig_hash_types']):
                # Compute sighash for this input; we sign it manually using sign_ecdsa/sign_schnorr
                # and then broadcast the complete transaction
                sighash = SignatureHashLotus(
                    tx_to=tx,
                    spent_utxos=spent_outputs,
                    sig_hash_type=sig_hash_type,
                    input_index=i,
                    executed_script_hash=hash256(executed_scripts[key_idx]),
                    codeseparator_pos=test_case.get('codesep', 0xffff_ffff),
                )
                if test_case.get('schnorr', False):
                    signature = private_keys[key_idx].sign_schnorr(sighash)
                else:
                    signature = private_keys[key_idx].sign_ecdsa(sighash)
                signature += bytes(
                    [test_case.get('suffix', sig_hash_type & 0xff)])
                # Build correct scriptSig
                if test_case.get('opcodes'):
                    tx.vin[i].scriptSig = CScript(
                        [signature, executed_scripts[key_idx]])
                elif test_case.get('is_p2pk'):
                    tx.vin[i].scriptSig = CScript([signature])
                else:
                    tx.vin[i].scriptSig = CScript(
                        [signature, public_keys[key_idx].get_bytes()])

                sig_hash_type_str = self.get_sig_hash_type_str(sig_hash_type)
                if sig_hash_type_str is not None and 'opcodes' not in test_case and 'error' not in test_case:
                    # If we're a simple output type (P2PKH or P2KH) and aren't supposed to fail,
                    # we sign using signrawtransactionwithkey and verify the transaction signed
                    # the expected sighash. We won't broadcast it though.
                    # Note: signrawtransactionwithkey will not sign using replay-protection.
                    private_key_wif = bytes_to_wif(
                        private_keys[key_idx].get_bytes())
                    raw_tx_signed = self.nodes[0].signrawtransactionwithkey(
                        unsigned_tx, [private_key_wif], sign_inputs,
                        sig_hash_type_str)['hex']
                    # Extract signature from signed
                    signed_tx = CTransaction()
                    signed_tx.deserialize(
                        io.BytesIO(bytes.fromhex(raw_tx_signed)))
                    sig = list(CScript(signed_tx.vin[i].scriptSig))[0]
                    pubkey = private_keys[key_idx].get_pubkey()
                    sighash = SignatureHashLotus(
                        tx_to=tx,
                        spent_utxos=spent_outputs,
                        sig_hash_type=sig_hash_type & 0xff,
                        input_index=i,
                        executed_script_hash=hash256(
                            executed_scripts[key_idx]),
                    )
                    # Verify sig signs the above sighash and has the expected sighash type
                    assert pubkey.verify_ecdsa(sig[:-1], sighash)
                    assert sig[-1] == sig_hash_type & 0xff
                key_idx += 1
            # Broadcast transaction and check success/failure
            tx.rehash()
            if 'error' not in test_case:
                node.p2p.send_txs_and_test([tx], node)
            else:
                node.p2p.send_txs_and_test([tx],
                                           node,
                                           success=False,
                                           reject_reason=test_case['error'])
Пример #15
0
    def run_test(self):
        node = self.nodes[0]
        node.add_p2p_connection(P2PDataStore())
        node.setmocktime(REPLAY_PROTECTION_START_TIME)

        self.genesis_hash = int(node.getbestblockhash(), 16)
        self.block_heights[self.genesis_hash] = 0
        spendable_outputs = []

        # save the current tip so it can be spent by a later block
        def save_spendable_output():
            spendable_outputs.append(self.tip)

        # get an output that we previously marked as spendable
        def get_spendable_output():
            return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)

        # move the tip back to a previous block
        def tip(number):
            self.tip = self.blocks[number]

        # adds transactions to the block and updates state
        def update_block(block_number, new_transactions):
            block = self.blocks[block_number]
            block.vtx.extend(new_transactions)
            old_sha256 = block.sha256
            make_conform_to_ctor(block)
            block.hashMerkleRoot = block.calc_merkle_root()
            block.solve()
            # Update the internal state just like in next_block
            self.tip = block
            if block.sha256 != old_sha256:
                self.block_heights[
                    block.sha256] = self.block_heights[old_sha256]
                del self.block_heights[old_sha256]
            self.blocks[block_number] = block
            return block

        # shorthand
        block = self.next_block

        # Create a new block
        block(0)
        save_spendable_output()
        node.p2p.send_blocks_and_test([self.tip], node)

        # Now we need that block to mature so we can spend the coinbase.
        maturity_blocks = []
        for i in range(99):
            block(5000 + i)
            maturity_blocks.append(self.tip)
            save_spendable_output()
        node.p2p.send_blocks_and_test(maturity_blocks, node)

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(100):
            out.append(get_spendable_output())

        # Generate a key pair to test P2SH sigops count
        private_key = CECKey()
        private_key.set_secretbytes(b"replayprotection")
        public_key = private_key.get_pubkey()

        # This is a little handier to use than the version in blocktools.py
        def create_fund_and_spend_tx(spend, forkvalue=0):
            # Fund transaction
            script = CScript([public_key, OP_CHECKSIG])
            txfund = create_transaction(spend.tx, spend.n, b'',
                                        50 * COIN - 1000, script)
            txfund.rehash()

            # Spend transaction
            txspend = CTransaction()
            txspend.vout.append(CTxOut(50 * COIN - 2000, CScript([OP_TRUE])))
            txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b''))

            # Sign the transaction
            sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID
            sighash = SignatureHashForkId(script, txspend, 0, sighashtype,
                                          50 * COIN - 1000)
            sig = private_key.sign(sighash) + \
                bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
            txspend.vin[0].scriptSig = CScript([sig])
            txspend.rehash()

            return [txfund, txspend]

        def send_transaction_to_mempool(tx):
            tx_id = node.sendrawtransaction(ToHex(tx))
            assert tx_id in set(node.getrawmempool())
            return tx_id

        # Before the fork, no replay protection required to get in the mempool.
        txns = create_fund_and_spend_tx(out[0])
        send_transaction_to_mempool(txns[0])
        send_transaction_to_mempool(txns[1])

        # And txns get mined in a block properly.
        block(1)
        update_block(1, txns)
        node.p2p.send_blocks_and_test([self.tip], node)

        # Replay protected transactions are rejected.
        replay_txns = create_fund_and_spend_tx(out[1], 0xffdead)
        send_transaction_to_mempool(replay_txns[0])
        assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
                                node.sendrawtransaction, ToHex(replay_txns[1]))

        # And block containing them are rejected as well.
        block(2)
        update_block(2, replay_txns)
        node.p2p.send_blocks_and_test([self.tip],
                                      node,
                                      success=False,
                                      reject_reason='blk-bad-inputs')

        # Rewind bad block
        tip(1)

        # Create a block that would activate the replay protection.
        bfork = block(5555)
        bfork.nTime = REPLAY_PROTECTION_START_TIME - 1
        update_block(5555, [])
        node.p2p.send_blocks_and_test([self.tip], node)

        activation_blocks = []
        for i in range(5):
            block(5100 + i)
            activation_blocks.append(self.tip)
        node.p2p.send_blocks_and_test(activation_blocks, node)

        # Check we are just before the activation time
        assert_equal(
            node.getblockheader(node.getbestblockhash())['mediantime'],
            REPLAY_PROTECTION_START_TIME - 1)

        # We are just before the fork, replay protected txns still are rejected
        assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
                                node.sendrawtransaction, ToHex(replay_txns[1]))

        block(3)
        update_block(3, replay_txns)
        node.p2p.send_blocks_and_test([self.tip],
                                      node,
                                      success=False,
                                      reject_reason='blk-bad-inputs')

        # Rewind bad block
        tip(5104)

        # Send some non replay protected txns in the mempool to check
        # they get cleaned at activation.
        txns = create_fund_and_spend_tx(out[2])
        send_transaction_to_mempool(txns[0])
        tx_id = send_transaction_to_mempool(txns[1])

        # Activate the replay protection
        block(5556)
        node.p2p.send_blocks_and_test([self.tip], node)

        # Check we just activated the replay protection
        assert_equal(
            node.getblockheader(node.getbestblockhash())['mediantime'],
            REPLAY_PROTECTION_START_TIME)

        # Non replay protected transactions are not valid anymore,
        # so they should be removed from the mempool.
        assert tx_id not in set(node.getrawmempool())

        # Good old transactions are now invalid.
        send_transaction_to_mempool(txns[0])
        assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
                                node.sendrawtransaction, ToHex(txns[1]))

        # They also cannot be mined
        block(4)
        update_block(4, txns)
        node.p2p.send_blocks_and_test([self.tip],
                                      node,
                                      success=False,
                                      reject_reason='blk-bad-inputs')

        # Rewind bad block
        tip(5556)

        # The replay protected transaction is now valid
        replay_tx0_id = send_transaction_to_mempool(replay_txns[0])
        replay_tx1_id = send_transaction_to_mempool(replay_txns[1])

        # Make sure the transaction are ready to be mined.
        tmpl = node.getblocktemplate()

        found_id0 = False
        found_id1 = False

        for txn in tmpl['transactions']:
            txid = txn['txid']
            if txid == replay_tx0_id:
                found_id0 = True
            elif txid == replay_tx1_id:
                found_id1 = True

        assert found_id0 and found_id1

        # And the mempool is still in good shape.
        assert replay_tx0_id in set(node.getrawmempool())
        assert replay_tx1_id in set(node.getrawmempool())

        # They also can also be mined
        block(5)
        update_block(5, replay_txns)
        node.p2p.send_blocks_and_test([self.tip], node)

        # Ok, now we check if a reorg work properly across the activation.
        postforkblockid = node.getbestblockhash()
        node.invalidateblock(postforkblockid)
        assert replay_tx0_id in set(node.getrawmempool())
        assert replay_tx1_id in set(node.getrawmempool())

        # Deactivating replay protection.
        forkblockid = node.getbestblockhash()
        node.invalidateblock(forkblockid)
        # The funding tx is not evicted from the mempool, since it's valid in
        # both sides of the fork
        assert replay_tx0_id in set(node.getrawmempool())
        assert replay_tx1_id not in set(node.getrawmempool())

        # Check that we also do it properly on deeper reorg.
        node.reconsiderblock(forkblockid)
        node.reconsiderblock(postforkblockid)
        node.invalidateblock(forkblockid)
        assert replay_tx0_id in set(node.getrawmempool())
        assert replay_tx1_id not in set(node.getrawmempool())
    def run_test(self):
        node = self.nodes[0]
        self.pynode = P2PDataStore()
        self.connection = NodeConn('127.0.0.1', p2p_port(0), node, self.pynode)
        self.pynode.add_connection(self.connection)
        NetworkThread().start()
        self.pynode.wait_for_verack()

        # Get out of IBD
        node.generate(1)

        tip = self.getbestblock(node)

        logging.info("Create some blocks with OP_1 coinbase for spending.")
        blocks = []
        for _ in range(20):
            tip = self.build_block(tip)
            blocks.append(tip)
        self.pynode.send_blocks_and_test(blocks, node, success=True)
        self.spendable_outputs = deque(block.vtx[0] for block in blocks)

        logging.info("Mature the blocks.")
        node.generate(100)

        tip = self.getbestblock(node)

        # To make compact and fast-to-verify transactions, we'll use
        # CHECKDATASIG over and over with the same data.
        # (Using the same stuff over and over again means we get to hit the
        # node's signature cache and don't need to make new signatures every
        # time.)
        cds_message = b''
        # r=1 and s=1 ecdsa, the minimum values.
        cds_signature = bytes.fromhex('3006020101020101')
        # Recovered pubkey
        cds_pubkey = bytes.fromhex(
            '03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932'
        )

        def minefunding2(n):
            """ Mine a block with a bunch of outputs that are very dense
            sigchecks when spent (2 sigchecks each); return the inputs that can
            be used to spend. """
            cds_scriptpubkey = CScript([
                cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                OP_CHECKDATASIGVERIFY
            ])
            # The scriptsig is carefully padded to have size 26, which is the
            # shortest allowed for 2 sigchecks for mempool admission.
            # The resulting inputs have size 67 bytes, 33.5 bytes/sigcheck.
            cds_scriptsig = CScript([b'x' * 16, cds_signature])
            assert_equal(len(cds_scriptsig), 26)

            logging.debug(
                "Gen {} with locking script {} unlocking script {} .".format(
                    n, cds_scriptpubkey.hex(), cds_scriptsig.hex()))

            tx = self.spendable_outputs.popleft()
            usable_inputs = []
            txes = []
            for i in range(n):
                tx = create_transaction(tx, cds_scriptpubkey,
                                        bytes([OP_TRUE]) if i == 0 else b"")
                txes.append(tx)
                usable_inputs.append(
                    CTxIn(COutPoint(tx.sha256, 1), cds_scriptsig))
            newtip = self.build_block(tip, txes)
            self.pynode.send_blocks_and_test([newtip], node, timeout=10)
            return usable_inputs, newtip

        logging.info("Funding special coins that have high sigchecks")

        # mine 5000 funded outputs (10000 sigchecks)
        # will be used pre-activation and post-activation
        usable_inputs, tip = minefunding2(5000)
        # assemble them into 50 txes with 100 inputs each (200 sigchecks)
        submittxes_1 = []
        while len(usable_inputs) >= 100:
            tx = CTransaction()
            tx.vin = [usable_inputs.pop() for _ in range(100)]
            tx.vout = [CTxOut(0, CScript([OP_RETURN]))]
            tx.rehash()
            submittxes_1.append(tx)

        # mine 5000 funded outputs (10000 sigchecks)
        # will be used post-activation
        usable_inputs, tip = minefunding2(5000)
        # assemble them into 50 txes with 100 inputs each (200 sigchecks)
        submittxes_2 = []
        while len(usable_inputs) >= 100:
            tx = CTransaction()
            tx.vin = [usable_inputs.pop() for _ in range(100)]
            tx.vout = [CTxOut(0, CScript([OP_RETURN]))]
            tx.rehash()
            submittxes_2.append(tx)

        # Check high sigcheck transactions
        logging.info("Create transaction that have high sigchecks")

        fundings = []

        def make_spend(sigcheckcount):
            # Add a funding tx to fundings, and return a tx spending that using
            # scriptsig.
            logging.debug("Gen tx with {} sigchecks.".format(sigcheckcount))

            def get_script_with_sigcheck(count):
                return CScript([cds_message, cds_pubkey] +
                               (count - 1) * [OP_3DUP, OP_CHECKDATASIGVERIFY] +
                               [OP_CHECKDATASIG])

            # get funds locked with OP_1
            sourcetx = self.spendable_outputs.popleft()
            # make funding that forwards to scriptpubkey
            last_sigcheck_count = ((sigcheckcount - 1) % 30) + 1
            fundtx = create_transaction(
                sourcetx, get_script_with_sigcheck(last_sigcheck_count))

            fill_sigcheck_script = get_script_with_sigcheck(30)

            remaining_sigcheck = sigcheckcount
            while remaining_sigcheck > 30:
                fundtx.vout[0].nValue -= 1000
                fundtx.vout.append(CTxOut(100, bytes(fill_sigcheck_script)))
                remaining_sigcheck -= 30

            fundtx.rehash()
            fundings.append(fundtx)

            # make the spending
            scriptsig = CScript([cds_signature])

            tx = CTransaction()
            tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig))

            input_index = 2
            remaining_sigcheck = sigcheckcount
            while remaining_sigcheck > 30:
                tx.vin.append(
                    CTxIn(COutPoint(fundtx.sha256, input_index), scriptsig))
                remaining_sigcheck -= 30
                input_index += 1

            tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
            pad_tx(tx)
            tx.rehash()
            return tx

        # Create transactions with many sigchecks.
        good_tx = make_spend(MAX_TX_SIGCHECK)
        bad_tx = make_spend(MAX_TX_SIGCHECK + 1)
        tip = self.build_block(tip, fundings)
        self.pynode.send_blocks_and_test([tip], node)

        # Both tx are accepted before the activation.
        pre_activation_sigcheck_block = self.build_block(
            tip, [good_tx, bad_tx])
        self.pynode.send_blocks_and_test([pre_activation_sigcheck_block], node)
        node.invalidateblock(pre_activation_sigcheck_block.hash)

        # after block is invalidated these tx are put back into the mempool.  Test uses them later so evict.
        waitFor(10, lambda: node.getmempoolinfo()["size"] == 2)
        node.evicttransaction(good_tx.hash)
        node.evicttransaction(bad_tx.hash)

        # Activation tests

        logging.info("Approach to just before upgrade activation")
        # Move our clock to the uprade time so we will accept such
        # future-timestamped blocks.
        node.setmocktime(SIGCHECKS_ACTIVATION_TIME + 10)
        # Mine six blocks with timestamp starting at
        # SIGCHECKS_ACTIVATION_TIME-1
        blocks = []
        for i in range(-1, 5):
            tip = self.build_block(tip, nTime=SIGCHECKS_ACTIVATION_TIME + i)
            blocks.append(tip)
        self.pynode.send_blocks_and_test(blocks, node, timeout=TIMEOUT)
        assert_equal(node.getblockchaininfo()['mediantime'],
                     SIGCHECKS_ACTIVATION_TIME - 1)

        logging.info(
            "The next block will activate, but the activation block itself must follow old rules"
        )
        # Send the 50 txes and get the node to mine as many as possible (it should do all)
        # The node is happy mining and validating a 10000 sigcheck block before
        # activation.
        self.pynode.send_txs_and_test(submittxes_1, node, timeout=TIMEOUT)
        [blockhash] = node.generate(1)
        assert_equal(set(node.getblock(blockhash, 1)["tx"][1:]),
                     {t.hash
                      for t in submittxes_1})

        # We have activated, but let's invalidate that.
        assert_equal(node.getblockchaininfo()['mediantime'],
                     SIGCHECKS_ACTIVATION_TIME)
        node.invalidateblock(blockhash)

        # Try again manually and invalidate that too
        goodblock = self.build_block(tip, submittxes_1)
        self.pynode.send_blocks_and_test([goodblock], node, timeout=TIMEOUT)
        node.invalidateblock(goodblock.hash)
        # All transactions should be back in mempool: validation is very slow in debug build
        waitFor(
            60, lambda: set(node.getrawmempool()) ==
            {t.hash
             for t in submittxes_1})

        logging.info("Mine the activation block itself")
        tip = self.build_block(tip)
        self.pynode.send_blocks_and_test([tip], node, timeout=TIMEOUT)

        logging.info("We have activated!")
        assert_equal(node.getblockchaininfo()['mediantime'],
                     SIGCHECKS_ACTIVATION_TIME)

        # All transactions get re-evaluated to count sigchecks, so wait for them
        waitFor(
            60, lambda: set(node.getrawmempool()) ==
            {t.hash
             for t in submittxes_1})

        logging.info(
            "Try a block with a transaction going over the limit (limit: {})".
            format(MAX_TX_SIGCHECK))
        bad_tx_block = self.build_block(tip, [bad_tx])
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            bad_tx_block,
            reject_reason=BLOCK_SIGCHECKS_BAD_TX_SIGCHECKS)

        logging.info(
            "Try a block with a transaction just under the limit (limit: {})".
            format(MAX_TX_SIGCHECK))
        good_tx_block = self.build_block(tip, [good_tx])
        self.pynode.send_blocks_and_test([good_tx_block],
                                         node,
                                         timeout=TIMEOUT)
        node.invalidateblock(good_tx_block.hash)

        # save this tip for later
        # ~ upgrade_block = tip

        # Transactions still in pool:
        waitFor(
            60, lambda: set(node.getrawmempool()) ==
            {t.hash
             for t in submittxes_1})

        logging.info(
            "Try sending 10000-sigcheck blocks after activation (limit: {})".
            format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO))
        # Send block with same txes we just tried before activation
        badblock = self.build_block(tip, submittxes_1)
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            badblock,
            reject_reason="Invalid block due to bad-blk-sigchecks",
            expect_ban=True)

        logging.info(
            "There are too many sigchecks in mempool to mine in a single block. Make sure the node won't mine invalid blocks.  Num tx: %s"
            % str(node.getmempoolinfo()))
        blk = node.generate(1)
        tip = self.getbestblock(node)
        # only 39 txes got mined.
        assert_equal(len(node.getrawmempool()), 11)

        logging.info(
            "Try sending 10000-sigcheck block with fresh transactions after activation (limit: {})"
            .format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO))
        # Note: in the following tests we'll be bumping timestamp in order
        # to bypass any kind of 'bad block' cache on the node, and get a
        # fresh evaluation each time.

        # Try another block with 10000 sigchecks but all fresh transactions
        badblock = self.build_block(tip,
                                    submittxes_2,
                                    nTime=SIGCHECKS_ACTIVATION_TIME + 5)
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            badblock,
            reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS)

        # Send the same txes again with different block hash. Currently we don't
        # cache valid transactions in invalid blocks so nothing changes.
        badblock = self.build_block(tip,
                                    submittxes_2,
                                    nTime=SIGCHECKS_ACTIVATION_TIME + 6)
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            badblock,
            reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS)

        # Put all the txes in mempool, in order to get them cached:
        self.pynode.send_txs_and_test(submittxes_2, node, timeout=TIMEOUT)
        # Send them again, the node still doesn't like it. But the log
        # error message has now changed because the txes failed from cache.
        badblock = self.build_block(tip,
                                    submittxes_2,
                                    nTime=SIGCHECKS_ACTIVATION_TIME + 7)
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            badblock,
            reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS)

        logging.info(
            "Try sending 8000-sigcheck block after activation (limit: {})".
            format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO))
        badblock = self.build_block(tip,
                                    submittxes_2[:40],
                                    nTime=SIGCHECKS_ACTIVATION_TIME + 5)
        check_for_ban_on_rejected_block(
            self.pynode,
            node,
            badblock,
            reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS)
        # redundant, but just to mirror the following test...
        node.set("consensus.maxBlockSigChecks=%d" % MAX_BLOCK_SIGCHECKS)

        logging.info(
            "Bump the excessiveblocksize limit by 1 byte, and send another block with same txes (new sigchecks limit: {})"
            .format((MAXBLOCKSIZE + 1) // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO))
        node.set("consensus.maxBlockSigChecks=%d" % (MAX_BLOCK_SIGCHECKS + 1))
        tip = self.build_block(tip,
                               submittxes_2[:40],
                               nTime=SIGCHECKS_ACTIVATION_TIME + 6)
        # It should succeed now since limit should be 8000.
        self.pynode.send_blocks_and_test([tip], node, timeout=TIMEOUT)