async def test_connection_limit(self, loop): connections = [] for i in range(MAX_RPC_CONNECTIONS): c = ElectrumConnection() await c.connect() connections.append(c) # Exceed limit, we should get disconnected. extra_connection = ElectrumConnection() await extra_connection.connect() try: await asyncio.wait_for(extra_connection.call("server.ping"), timeout=5) assert (False) except asyncio.TimeoutError: # We expect this to timeout pass waitFor(5, lambda: not extra_connection.is_connected()) # Drop one connection connections[0].disconnect() # New connection should be accepted now. extra_connection2 = ElectrumConnection() await extra_connection2.connect() await asyncio.wait_for(extra_connection2.call("server.ping"), timeout=5) for c in connections[1:] + [extra_connection2]: c.disconnect()
def wait_for_electrum_mempool(node, *, count, timeout=10): try: waitFor(timeout, lambda: compare(node, "mempool_count", count, True)) except Exception as e: print("Waited for {} txs, had {}".format( count, node.getelectruminfo()['debuginfo']['electrscash_mempool_count'])) raise
def send_txs_and_test(self, txs, node, *, success=True, expect_ban=False, reject_reason=None, timeout=60): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" assert (len(txs)) with mininode_lock: for tx in txs: self.tx_store[tx.sha256] = tx BAN_MSG = "BAN THRESHOLD EXCEEDED" expected_msgs = [] unexpected_msgs = [] if reject_reason: expected_msgs.append(reject_reason) if expect_ban: expected_msgs.append(BAN_MSG) else: unexpected_msgs.append(BAN_MSG) with node.assert_debug_log(expected_msgs=expected_msgs, unexpected_msgs=unexpected_msgs): for tx in txs: self.send_message(msg_tx(tx)) self.sync_with_ping() if success: # Check that all txs are now in the mempool for tx in txs: waitFor(timeout, lambda: tx.hash in node.getrawmempool(), onError="{} tx not found in mempool".format( tx.hash)) else: # Check that none of the txs are now in the mempool for tx in txs: waitFor(timeout, lambda: tx.hash not in node.getrawmempool(), onError="{} tx not found in mempool".format( tx.hash))
def test_address_balance(self, n, electrum_client): addr = n.getnewaddress() txhash = n.sendtoaddress(addr, 1) scripthash = address_to_scripthash(addr) def check_address(address, unconfirmed=0, confirmed=0): res = electrum_client.call("blockchain.scripthash.get_balance", address_to_scripthash(addr)) return res["unconfirmed"] == unconfirmed * COIN \ and res["confirmed"] == confirmed * COIN waitFor(10, lambda: check_address(scripthash, unconfirmed=1)) n.generate(1) waitFor(10, lambda: check_address(scripthash, confirmed=1))
def run_test(self): if self.skip: return n = self.nodes[0] # bitcoind #1 should shutdown when "electrs" does waitFor(30, lambda: not is_bitcoind_running(1)) # bitcoind #0 should not have exited, even though "electrs" has assert(is_bitcoind_running(0)) # del so the test framework doesn't try to stop the stopped node del self.nodes[1] if self.dummy_electrum_path: os.unlink(self.dummy_electrum_path)
def run_test(self): logging.info("Initializing test directory " + self.options.tmpdir) node = self.nodes[0] self.bootstrap_p2p() tip = self.getbestblock(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] logging.info("Mature the blocks and get out of IBD.") node.generate(100) tip = self.getbestblock(node) logging.info("Setting up spends to test and mining the fundings.") fundings = [] # Generate a key pair privkeybytes = b"Schnorr!" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() def create_fund_and_spend_tx(dummy=OP_0, sigtype='ecdsa'): spendfrom = spendable_outputs.pop() script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) value = spendfrom.vout[0].nValue # Fund transaction txfund = create_transaction(spendfrom, 0, b'', value, script) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId(script, txspend, 0, sighashtype, value) if sigtype == 'schnorr': txsig = schnorr.sign(privkeybytes, sighash) + hashbyte elif sigtype == 'ecdsa': txsig = private_key.sign(sighash) + hashbyte txspend.vin[0].scriptSig = CScript([dummy, txsig]) txspend.rehash() return txspend # This is valid. ecdsa0tx = create_fund_and_spend_tx(OP_0, 'ecdsa') # This is invalid. ecdsa1tx = create_fund_and_spend_tx(OP_1, 'ecdsa') # This is invalid. schnorr0tx = create_fund_and_spend_tx(OP_0, 'schnorr') # This is valid. schnorr1tx = create_fund_and_spend_tx(OP_1, 'schnorr') tip = self.build_block(tip, fundings) self.p2p.send_blocks_and_test([tip], node) logging.info("Send a legacy ECDSA multisig into mempool.") self.p2p.send_txs_and_test([ecdsa0tx], node) waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx.hash]) logging.info("Trying to mine a non-null-dummy ECDSA.") self.check_for_ban_on_rejected_block(self.build_block(tip, [ecdsa1tx]), BADINPUTS_ERROR) logging.info( "If we try to submit it by mempool or RPC, it is rejected and we are banned" ) assert_raises_rpc_error(-26, ECDSA_NULLDUMMY_ERROR, node.sendrawtransaction, ToHex(ecdsa1tx)) self.check_for_ban_on_rejected_tx(ecdsa1tx, ECDSA_NULLDUMMY_ERROR) logging.info( "Submitting a Schnorr-multisig via net, and mining it in a block") self.p2p.send_txs_and_test([schnorr1tx], node) waitFor( 10, lambda: set(node.getrawmempool()) == {ecdsa0tx.hash, schnorr1tx.hash}) tip = self.build_block(tip, [schnorr1tx]) self.p2p.send_blocks_and_test([tip], node) logging.info( "That legacy ECDSA multisig is still in mempool, let's mine it") waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx.hash]) tip = self.build_block(tip, [ecdsa0tx]) self.p2p.send_blocks_and_test([tip], node) waitFor(10, lambda: node.getrawmempool() == []) logging.info( "Trying Schnorr in legacy multisig is invalid and banworthy.") self.check_for_ban_on_rejected_tx(schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr0tx]), BADINPUTS_ERROR)
def sync_electrs(self, n): waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
def get_tests(self): node = self.nodes[0] # First, we generate some coins to spend. node.generate(125) # Create various outputs using the OP_CHECKDATASIG # to check for activation. tx_hex = self.create_checkdatasig_tx(25) txid = node.sendrawtransaction(tx_hex) assert (txid in set(node.getrawmempool())) node.generate(1) assert (txid not in set(node.getrawmempool())) # register the spendable outputs. tx = FromHex(CTransaction(), tx_hex) tx.rehash() spendable_checkdatasigs = [ PreviousSpendableOutput(tx, i) for i in range(len(tx.vout)) ] def spend_checkdatasig(): outpoint = spendable_checkdatasigs.pop() out = outpoint.tx.vout[outpoint.n] tx = CTransaction() tx.vin = [CTxIn(COutPoint(outpoint.tx.sha256, outpoint.n))] tx.vout = [ CTxOut(out.nValue, CScript([])), CTxOut(0, CScript([random.getrandbits(800), OP_RETURN])) ] tx.vout[0].nValue -= min(tx.vout[0].nValue / 100, 1000) # node.calculate_fee(tx) tx.rehash() return tx # Check that transactions using checkdatasig are not accepted yet. logging.info("Try to use the checkdatasig opcodes before activation") tx0 = spend_checkdatasig() tx0_hex = ToHex(tx0) assert_raises_rpc_error(-26, RPC_BAD_OPCODE_ERROR, node.sendrawtransaction, tx0_hex) # Push MTP forward just before activation. logging.info("Pushing MTP just before the activation and check again") node.setmocktime(NOV152018_START_TIME) # returns a test case that asserts that the current tip was accepted def accepted(tip): return TestInstance([[tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(tip, reject=None): if reject is None: return TestInstance([[tip, False]]) else: return TestInstance([[tip, reject]]) def next_block(block_time): # get block height blockchaininfo = node.getblockchaininfo() height = int(blockchaininfo['blocks']) # create the block coinbase = create_coinbase(height) coinbase.rehash() block = create_block(int(node.getbestblockhash(), 16), coinbase, block_time) # Do PoW, which is cheap on regnet block.solve() return block for i in range(6): b = next_block(NOV152018_START_TIME + i - 1) yield accepted(b) # Check again just before the activation time assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], NOV152018_START_TIME - 1) assert_raises_rpc_error(-26, RPC_BAD_OPCODE_ERROR, node.sendrawtransaction, tx0_hex) def add_tx(block, tx): block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() b = next_block(NOV152018_START_TIME + 6) add_tx(b, tx0) yield rejected(b, RejectResult(16, b'bad-blk-signatures')) logging.info("Activates checkdatasig") fork_block = next_block(NOV152018_START_TIME + 6) yield accepted(fork_block) assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], NOV152018_START_TIME) tx0id = node.sendrawtransaction(tx0_hex) assert (tx0id in set(node.getrawmempool())) # Transactions can also be included in blocks. nov152018forkblock = next_block(NOV152018_START_TIME + 7) add_tx(nov152018forkblock, tx0) yield accepted(nov152018forkblock) logging.info("Cause a reorg that deactivate the checkdatasig opcodes") # Invalidate the checkdatasig block, ensure tx0 gets back to the mempool. assert (tx0id not in set(node.getrawmempool())) node.invalidateblock(format(nov152018forkblock.sha256, 'x')) waitFor(3, lambda: tx0id in set(node.getrawmempool()), "Transaction shoud be included in the mempool") node.invalidateblock(format(fork_block.sha256, 'x')) waitFor(3, lambda: tx0id not in set(node.getrawmempool()), "Transaction should not be included in the memopool")
def test_mempoolsync(self, n): # waitFor throws on timeout, failing the test waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor( 10, lambda: compare(n, "index_txns", n.getblockcount() + 1, True)) # +1 is genesis tx waitFor(10, lambda: compare(n, "mempool_count", 0, True)) logging.info("Check that mempool is communicated") n.sendtoaddress(n.getnewaddress(), 1) assert_equal(1, len(n.getrawmempool())) waitFor(10, lambda: compare(n, "mempool_count", 1, True)) n.generate(1) assert_equal(0, len(n.getrawmempool())) waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor(10, lambda: compare(n, "mempool_count", 0, True)) waitFor(10, lambda: compare(n, "index_txns", n.getblockcount() + 2, True))
def sync_electrum_height(node, timeout=10): waitFor(timeout, lambda: compare(node, "index_height", node.getblockcount()))
def run_test(self): (node, ) = self.nodes self.pynode = P2PDataStore() self.connection = NodeConn('127.0.0.1', p2p_port(0), node, self.pynode) self.pynode.add_connection(self.connection) NetworkThread().start() self.pynode.wait_for_verack() # Get out of IBD node.generate(1) tip = self.getbestblock(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(20): tip = self.build_block(tip) blocks.append(tip) self.pynode.send_blocks_and_test(blocks, node, timeout=10) self.spendable_outputs = deque(block.vtx[0] for block in blocks) logging.info("Mature the blocks.") node.generate(100) tip = self.getbestblock(node) # To make compact and fast-to-verify transactions, we'll use # CHECKDATASIG over and over with the same data. # (Using the same stuff over and over again means we get to hit the # node's signature cache and don't need to make new signatures every # time.) cds_message = b'' # r=1 and s=1 ecdsa, the minimum values. cds_signature = bytes.fromhex('3006020101020101') # Recovered pubkey cds_pubkey = bytes.fromhex( '03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932' ) fundings = [] def make_spend(scriptpubkey, scriptsig): # Add a funding tx to fundings, and return a tx spending that using # scriptsig. logging.debug( "Gen tx with locking script {} unlocking script {} .".format( scriptpubkey.hex(), scriptsig.hex())) # get funds locked with OP_1 sourcetx = self.spendable_outputs.popleft() # make funding that forwards to scriptpubkey fundtx = create_transaction(sourcetx, scriptpubkey) fundings.append(fundtx) # make the spending tx = CTransaction() tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig)) tx.vout.append(CTxOut(0, CScript([OP_RETURN]))) pad_tx(tx) tx.rehash() return tx logging.info("Generating txes used in this test") # "Good" txns that pass our rule: goodtxes = [ # most dense allowed input -- 2 sigchecks with a 26-byte scriptsig. make_spend( CScript([ cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY ]), CScript([b'x' * 16, cds_signature])), # 4 sigchecks with a 112-byte scriptsig, just at the limit for this # sigchecks count. make_spend( CScript([ cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY ]), CScript([b'x' * 101, cds_signature])), # "nice" transaction - 1 sigcheck with 9-byte scriptsig. make_spend(CScript([cds_message, cds_pubkey, OP_CHECKDATASIG]), CScript([cds_signature])), # 1 sigcheck with 0-byte scriptsig. make_spend( CScript( [cds_signature, cds_message, cds_pubkey, OP_CHECKDATASIG]), CScript([])), ] badtxes = [ # "Bad" txns: # 2 sigchecks with a 25-byte scriptsig, just 1 byte too short. make_spend( CScript([ cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY ]), CScript([b'x' * 15, cds_signature])), # 4 sigchecks with a 111-byte scriptsig, just 1 byte too short. make_spend( CScript([ cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY ]), CScript([b'x' * 100, cds_signature])), ] goodtxids = set(t.hash for t in goodtxes) badtxids = set(t.hash for t in badtxes) logging.info("Funding the txes") tip = self.build_block(tip, fundings) self.pynode.send_blocks_and_test([tip], node, timeout=10) # Activation tests logging.info("Approach to just before upgrade activation") # Move our clock to the uprade time so we will accept such # future-timestamped blocks. node.setmocktime(MAY2020_START_TIME + 10) # Mine six blocks with timestamp starting at # SIGCHECKS_ACTIVATION_TIME-1 blocks = [] for i in range(-1, 5): tip = self.build_block(tip, nTime=MAY2020_START_TIME + i) blocks.append(tip) self.pynode.send_blocks_and_test(blocks, node, timeout=10) assert_equal(node.getblockchaininfo()['mediantime'], MAY2020_START_TIME - 1) logging.info( "The next block will activate, but the activation block itself must follow old rules" ) logging.info("Send all the transactions just before upgrade") self.pynode.send_txs_and_test(goodtxes, node) self.pynode.send_txs_and_test(badtxes, node) assert_equal(set(node.getrawmempool()), goodtxids | badtxids) # ask the node to mine a block, it should include the bad txes. [blockhash] = node.generate(1) assert_equal(set(node.getblock(blockhash, 1)['tx'][1:]), goodtxids | badtxids) assert_equal(node.getrawmempool(), []) # discard that block node.invalidateblock(blockhash) waitFor(30, lambda: set(node.getrawmempool()) == goodtxids | badtxids) logging.info("Mine the activation block itself") tip = self.build_block(tip) self.pynode.send_blocks_and_test([tip], node, timeout=10) logging.info("We have activated!") assert_equal(node.getblockchaininfo()['mediantime'], MAY2020_START_TIME) logging.info( "The high-sigchecks transactions got evicted but the good ones are still around" ) waitFor( 20, lambda: True if set(node.getrawmempool()) == goodtxids else logging.info(node.getrawmempool())) logging.info( "Now the high-sigchecks transactions are rejected from mempool.") # try sending some of the bad txes again after the upgrade for tx in badtxes: self.check_for_no_ban_on_rejected_tx( node, tx, None) # No reject reason because we don't log on rejection assert_raises_rpc_error(-26, TX_INPUT_SIGCHECKS_ERROR, node.sendrawtransaction, ToHex(tx)) logging.info("But they can still be mined!") # Now make a block with all the txes, they still are accepted in blocks! tip = self.build_block(tip, goodtxes + badtxes) self.pynode.send_blocks_and_test([tip], node, timeout=10) assert_equal(node.getbestblockhash(), tip.hash)
def run_test(self): logging.info("Initializing test directory " + self.options.tmpdir) node = self.nodes[0] self.bootstrap_p2p() tip = self.get_best_block(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] logging.info("Mature the blocks and get out of IBD.") node.generate(100) tip = self.get_best_block(node) logging.info( "Set up spending transactions to test and mine the funding transactions." ) # Generate a key pair privkeybytes = b"xyzxyzhh" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() def create_fund_and_spend_tx(): spend_from = spendable_outputs.pop() value = spend_from.vout[0].nValue # Reversed data data = bytes.fromhex('0123456789abcdef') rev_data = bytes(reversed(data)) # Lockscript: provide a bytestring that reverses to X script = CScript([OP_REVERSEBYTES, rev_data, OP_EQUAL]) # Fund transaction: REVERSEBYTES <reversed(x)> EQUAL tx_fund = create_tx_with_script(spend_from, 0, b'', value, script) tx_fund.rehash() # Spend transaction: <x> tx_spend = CTransaction() tx_spend.vout.append( CTxOut(value - 1000, CScript([b'x' * 100, OP_RETURN]))) tx_spend.vin.append(CTxIn(COutPoint(tx_fund.sha256, 0), b'')) tx_spend.vin[0].scriptSig = CScript([data]) tx_spend.rehash() return tx_spend, tx_fund # Create funding/spending transaction pair tx_reversebytes_spend, tx_reversebytes_fund = create_fund_and_spend_tx( ) # Mine funding transaction into block. Pre-upgrade output scripts can have # OP_REVERSEBYTES and still be fully valid, but they cannot spend it. tip = self.build_block(tip, [tx_reversebytes_fund]) self.p2p.send_blocks_and_test([tip], node) logging.info("Start pre-upgrade tests") assert node.getblockheader( node.getbestblockhash())['mediantime'] < MAY2020_START_TIME logging.info( "Sending rejected transaction (bad opcode) via RPC (doesn't ban)") assert_raises_rpc_error(-26, PRE_UPGRADE_BAD_OPCODE_ERROR, node.sendrawtransaction, ToHex(tx_reversebytes_spend)) logging.info( "Sending rejected transaction (bad opcode) via net (no banning)") self.check_for_no_ban_on_rejected_tx(tx_reversebytes_spend, PRE_UPGRADE_BAD_OPCODE_ERROR) logging.info( "Sending invalid transactions in blocks (bad inputs, and get banned)" ) self.check_for_ban_on_rejected_block( self.build_block(tip, [tx_reversebytes_spend]), BAD_INPUTS_ERROR) logging.info("Start activation tests") logging.info("Approach to just before upgrade activation") # Move our clock to the upgrade time so we will accept such # future-timestamped blocks. node.setmocktime(MAY2020_START_TIME) # Mine six blocks with timestamp starting at MAY2020_START_TIME-1 blocks = [] for i in range(-1, 5): tip = self.build_block(tip, n_time=MAY2020_START_TIME + i) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node) # Ensure our MTP is MAY2020_START_TIME-1, just before activation waitFor(10, lambda: node.getblockchaininfo()['mediantime'], MAY2020_START_TIME - 1) logging.info( "The next block will activate, but the activation block itself must follow old rules" ) self.check_for_ban_on_rejected_block( self.build_block(tip, [tx_reversebytes_spend]), BAD_INPUTS_ERROR) # Save pre-upgrade block, we will reorg based on this block later pre_upgrade_block = tip logging.info("Mine the activation block itself") tip = self.build_block(tip, []) self.p2p.send_blocks_and_test([tip], node) logging.info("We have activated!") # Ensure our MTP is MAY2020_START_TIME, exactly at activation waitFor( 10, lambda: node.getblockchaininfo()['mediantime'] == MAY2020_START_TIME) # Ensure empty mempool waitFor(10, lambda: node.getrawmempool() == []) # Save upgrade block, will invalidate and reconsider this later upgrade_block = tip logging.info( "Submitting a new OP_REVERSEBYTES tx via net, and mining it in a block" ) # Send OP_REVERSEBYTES tx self.p2p.send_txs_and_test([tx_reversebytes_spend], node) # Verify OP_REVERSEBYTES tx is in mempool waitFor( 10, lambda: set(node.getrawmempool()) == {tx_reversebytes_spend.hash}) # Mine OP_REVERSEBYTES tx into block tip = self.build_block(tip, [tx_reversebytes_spend]) self.p2p.send_blocks_and_test([tip], node) # Save post-upgrade block, will invalidate and reconsider this later post_upgrade_block = tip logging.info("Start deactivation tests") logging.info( "Invalidating the post-upgrade blocks returns OP_REVERSEBYTES transaction to mempool" ) node.invalidateblock(post_upgrade_block.hash) waitFor(5, lambda: node.getbestblockhash() == upgrade_block.hash) waitFor(5, lambda: len(node.getrawmempool()) > 0) assert_equal(set(node.getrawmempool()), {tx_reversebytes_spend.hash}) logging.info( "Invalidating the upgrade block evicts the OP_REVERSEBYTES transaction" ) node.invalidateblock(upgrade_block.hash) assert_equal(set(node.getrawmempool()), set()) logging.info("Return to our tip") try: node.reconsiderblock(upgrade_block.hash) node.reconsiderblock(post_upgrade_block.hash) except Exception as e: # Workaround for reconsiderblock bug; # Even though the block reconsidered was valid, if another block # is also reconsidered and fails, the call will return failure. pass waitFor(10, lambda: node.getbestblockhash() == tip.hash) waitFor(10, lambda: node.getrawmempool() == []) logging.info("Create an empty-block reorg that forks from pre-upgrade") tip = pre_upgrade_block blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node) logging.info( "Transactions from orphaned blocks are sent into mempool ready to be mined again, " "including upgrade-dependent ones even though the fork deactivated and reactivated " "the upgrade.") waitFor( 10, lambda: set(node.getrawmempool()) == {tx_reversebytes_spend.hash}) node.generate(1) tip = self.get_best_block(node) assert (set(tx.rehash() for tx in tip.vtx) >= {tx_reversebytes_spend.hash})
def run_test(self): logging.info("Initializing test directory "+self.options.tmpdir) node = self.nodes[0] self.bootstrap_p2p() tip = self.get_best_block(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] logging.info("Mature the blocks and get out of IBD.") node.generate(100) tip = self.get_best_block(node) logging.info( "Set up spending transactions to test and mine the funding transactions.") # Generate a key pair privkeybytes = b"xyzxyzhh" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() # Create funding/spending transaction pair spend_from = spendable_outputs.pop() value = spend_from.vout[0].nValue # Reversed data data = bytes.fromhex('0123456789abcdef') rev_data = bytes(reversed(data)) # Lockscript: provide a bytestring that reverses to X script = CScript([OP_REVERSEBYTES, rev_data, OP_EQUAL]) # Fund transaction: REVERSEBYTES <reversed(x)> EQUAL tx_reversebytes_fund = create_tx_with_script(spend_from, 0, b'', value, script) tx_reversebytes_fund.rehash() # Spend transaction: <x> tx_reversebytes_spend = CTransaction() tx_reversebytes_spend.vout.append(CTxOut(value - 1000, CScript([b'x' * 100, OP_RETURN]))) tx_reversebytes_spend.vin.append(CTxIn(COutPoint(tx_reversebytes_fund.sha256, 0), b'')) tx_reversebytes_spend.vin[0].scriptSig = CScript([data]) tx_reversebytes_spend.rehash() # Mine funding transaction into block. Pre-upgrade output scripts can have # OP_REVERSEBYTES and still be fully valid, but they cannot spend it. tip = self.build_block(tip, [tx_reversebytes_fund]) self.p2p.send_blocks_and_test([tip], node) logging.info( "Submitting a new OP_REVERSEBYTES tx via net, and mining it in a block") # Send OP_REVERSEBYTES tx self.p2p.send_txs_and_test([tx_reversebytes_spend], node) # Verify OP_REVERSEBYTES tx is in mempool waitFor(10, lambda: set(node.getrawmempool()) == {tx_reversebytes_spend.hash}) # Mine OP_REVERSEBYTES tx into block tip = self.build_block(tip, [tx_reversebytes_spend]) self.p2p.send_blocks_and_test([tip], node)
def run_test(self): n = self.nodes[0] n.generate(200) # waitFor throws on timeout, failing the test waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor(10, lambda: compare(n, "mempool_count", 0, True)) n.sendtoaddress(n.getnewaddress(), 1) assert_equal(1, len(n.getrawmempool())) waitFor(10, lambda: compare(n, "mempool_count", 1, True)) blocks = n.generate(50) waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor(10, lambda: compare(n, "mempool_count", 0, True)) logging.info("invalidating %d blocks", len(blocks)) n.invalidateblock(blocks[0]) # electrum server should trim its chain as well and see our # transaction go back into mempool waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor(10, lambda: compare(n, "mempool_count", 1, True)) n.generate(50) waitFor(10, lambda: compare(n, "index_height", n.getblockcount())) waitFor(10, lambda: compare(n, "mempool_count", 0, True))
def run_test(self): node = self.nodes[0] self.bootstrap_p2p() tip = self.getbestblock(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] logging.info("Mature the blocks and get out of IBD.") node.generate(100) tip = self.getbestblock(node) logging.info("Setting up spends to test and mining the fundings.") fundings = [] # Generate a key pair privkeybytes = b"Schnorr!" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() def create_fund_and_spend_tx(dummy=OP_0, sigtype='ecdsa'): spendfrom = spendable_outputs.pop() script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) value = spendfrom.vout[0].nValue # Fund transaction txfund = create_transaction(spendfrom, 0, b'', value, script) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() txspend.vout.append( CTxOut(value-1000, CScript([OP_TRUE]))) txspend.vin.append( CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId( script, txspend, 0, sighashtype, value) if sigtype == 'schnorr': txsig = schnorr.sign(privkeybytes, sighash) + hashbyte elif sigtype == 'ecdsa': txsig = private_key.sign(sighash) + hashbyte txspend.vin[0].scriptSig = CScript([dummy, txsig]) txspend.rehash() return txspend # two of these transactions, which are valid both before and after upgrade. ecdsa0tx = create_fund_and_spend_tx(OP_0, 'ecdsa') ecdsa0tx_2 = create_fund_and_spend_tx(OP_0, 'ecdsa') # two of these, which are nonstandard before upgrade and invalid after. ecdsa1tx = create_fund_and_spend_tx(OP_1, 'ecdsa') ecdsa1tx_2 = create_fund_and_spend_tx(OP_1, 'ecdsa') # this one is always invalid. schnorr0tx = create_fund_and_spend_tx(OP_0, 'schnorr') # this one is only going to be valid after the upgrade. schnorr1tx = create_fund_and_spend_tx(OP_1, 'schnorr') tip = self.build_block(tip, fundings) self.p2p.send_blocks_and_test([tip], node) logging.info("Start preupgrade tests") assert node.getblockheader(node.getbestblockhash())['mediantime'] < NOV2019_START_TIME logging.info("Sending rejected transactions via RPC") assert_raises_rpc_error(-26, PREUPGRADE_ECDSA_NULLDUMMY_ERROR, node.sendrawtransaction, ToHex(ecdsa1tx)) assert_raises_rpc_error(-26, SCHNORR_LEGACY_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorr0tx)) assert_raises_rpc_error(-26, PREUPGRADE_SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorr1tx)) logging.info( "Sending rejected transactions via net (banning depending on situation)") self.check_for_no_ban_on_rejected_tx( ecdsa1tx, PREUPGRADE_ECDSA_NULLDUMMY_ERROR) self.check_for_ban_on_rejected_tx( schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) self.check_for_no_ban_on_rejected_tx( schnorr1tx, PREUPGRADE_SCHNORR_MULTISIG_ERROR) logging.info( "Sending invalid transactions in blocks (and get banned!)") self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr0tx]), BADSIG_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr1tx]), BADSIG_ERROR) logging.info("Sending valid transaction via net, then mining it") self.p2p.send_txs_and_test([ecdsa0tx], node) waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx.hash]) tip = self.build_block(tip, [ecdsa0tx]) self.p2p.send_blocks_and_test([tip], node) waitFor(10, lambda: node.getrawmempool() == []) # Activation tests logging.info("Approach to just before upgrade activation") # Move our clock to the uprade time so we will accept such future-timestamped blocks. node.setmocktime(NOV2019_START_TIME) # Mine six blocks with timestamp starting at NOV2019_START_TIME-1 blocks = [] for i in range(-1, 5): tip = self.build_block(tip, nTime=NOV2019_START_TIME + i) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node) waitFor(10, lambda: node.getblockchaininfo()[ 'mediantime'] == NOV2019_START_TIME - 1) logging.info( "The next block will activate, but the activation block itself must follow old rules") self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr0tx]), BADSIG_ERROR) logging.info( "Send a lecacy ECDSA multisig into mempool, we will check after upgrade to make sure it didn't get cleaned out unnecessarily.") self.p2p.send_txs_and_test([ecdsa0tx_2], node) waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx_2.hash]) # save this tip for later preupgrade_block = tip logging.info( "Mine the activation block itself, including a legacy nulldummy violation at the last possible moment") tip = self.build_block(tip, [ecdsa1tx]) self.p2p.send_blocks_and_test([tip], node) logging.info("We have activated!") waitFor(10, lambda: node.getblockchaininfo()[ 'mediantime'] == NOV2019_START_TIME) waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx_2.hash]) # save this tip for later upgrade_block = tip logging.info( "Trying to mine a legacy nulldummy violation, but we are just barely too late") self.check_for_ban_on_rejected_block( self.build_block(tip, [ecdsa1tx_2]), BADSIG_ERROR) logging.info( "If we try to submit it by mempool or RPC, the error code has changed but we still aren't banned") assert_raises_rpc_error(-26, POSTUPGRADE_ECDSA_NULLDUMMY_ERROR, node.sendrawtransaction, ToHex(ecdsa1tx_2)) self.check_for_no_ban_on_rejected_tx( ecdsa1tx_2, POSTUPGRADE_ECDSA_NULLDUMMY_ERROR) logging.info( "Submitting a new Schnorr-multisig via net, and mining it in a block") self.p2p.send_txs_and_test([schnorr1tx], node) waitFor(10, lambda: set(node.getrawmempool()) == {ecdsa0tx_2.hash, schnorr1tx.hash}) tip = self.build_block(tip, [schnorr1tx]) self.p2p.send_blocks_and_test([tip], node) # save this tip for later postupgrade_block = tip logging.info( "That legacy ECDSA multisig is still in mempool, let's mine it") waitFor(10, lambda: node.getrawmempool() == [ecdsa0tx_2.hash]) tip = self.build_block(tip, [ecdsa0tx_2]) self.p2p.send_blocks_and_test([tip], node) waitFor(10, lambda: node.getrawmempool() == []) logging.info( "Trying Schnorr in legacy multisig remains invalid and banworthy as ever") self.check_for_ban_on_rejected_tx( schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorr0tx]), BADSIG_ERROR) # Deactivation tests logging.info( "Invalidating the post-upgrade blocks returns the transactions to mempool") node.invalidateblock(postupgrade_block.hash) waitFor(10, lambda: set(node.getrawmempool()) == {ecdsa0tx_2.hash, schnorr1tx.hash}) logging.info( "Invalidating the upgrade block evicts the transactions valid only after upgrade") node.invalidateblock(upgrade_block.hash) waitFor(10, lambda: set(node.getrawmempool()) == {ecdsa0tx_2.hash}) logging.info("Return to our tip") try: node.reconsiderblock(upgrade_block.hash) node.reconsiderblock(postupgrade_block.hash) except Exception as e: # Workaround for reconsiderblock bug; # Even though the block reconsidered was valid, if another block # is also reconsidered and fails, the call will return failure. pass waitFor(10, lambda: node.getbestblockhash() == tip.hash) waitFor(10, lambda: node.getrawmempool() == []) logging.info( "Create an empty-block reorg that forks from pre-upgrade") tip = preupgrade_block blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) self.p2p.send_blocks_and_test(blocks, node) logging.info("Transactions from orphaned blocks are sent into mempool ready to be mined again, including upgrade-dependent ones even though the fork deactivated and reactivated the upgrade.") waitFor(10, lambda: set(node.getrawmempool()) == { ecdsa0tx_2.hash, schnorr1tx.hash}) node.generate(1) tip = self.getbestblock(node) assert set(tx.rehash() for tx in tip.vtx).issuperset( {ecdsa0tx_2.hash, schnorr1tx.hash})
def run_test(self): node = self.nodes[0] self.pynode = P2PDataStore() self.connection = NodeConn('127.0.0.1', p2p_port(0), node, self.pynode) self.pynode.add_connection(self.connection) NetworkThread().start() self.pynode.wait_for_verack() # Get out of IBD node.generate(1) tip = self.getbestblock(node) logging.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(20): tip = self.build_block(tip) blocks.append(tip) self.pynode.send_blocks_and_test(blocks, node, success=True) self.spendable_outputs = deque(block.vtx[0] for block in blocks) logging.info("Mature the blocks.") node.generate(100) tip = self.getbestblock(node) # To make compact and fast-to-verify transactions, we'll use # CHECKDATASIG over and over with the same data. # (Using the same stuff over and over again means we get to hit the # node's signature cache and don't need to make new signatures every # time.) cds_message = b'' # r=1 and s=1 ecdsa, the minimum values. cds_signature = bytes.fromhex('3006020101020101') # Recovered pubkey cds_pubkey = bytes.fromhex( '03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932' ) def minefunding2(n): """ Mine a block with a bunch of outputs that are very dense sigchecks when spent (2 sigchecks each); return the inputs that can be used to spend. """ cds_scriptpubkey = CScript([ cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY ]) # The scriptsig is carefully padded to have size 26, which is the # shortest allowed for 2 sigchecks for mempool admission. # The resulting inputs have size 67 bytes, 33.5 bytes/sigcheck. cds_scriptsig = CScript([b'x' * 16, cds_signature]) assert_equal(len(cds_scriptsig), 26) logging.debug( "Gen {} with locking script {} unlocking script {} .".format( n, cds_scriptpubkey.hex(), cds_scriptsig.hex())) tx = self.spendable_outputs.popleft() usable_inputs = [] txes = [] for i in range(n): tx = create_transaction(tx, cds_scriptpubkey, bytes([OP_TRUE]) if i == 0 else b"") txes.append(tx) usable_inputs.append( CTxIn(COutPoint(tx.sha256, 1), cds_scriptsig)) newtip = self.build_block(tip, txes) self.pynode.send_blocks_and_test([newtip], node, timeout=10) return usable_inputs, newtip logging.info("Funding special coins that have high sigchecks") # mine 5000 funded outputs (10000 sigchecks) # will be used pre-activation and post-activation usable_inputs, tip = minefunding2(5000) # assemble them into 50 txes with 100 inputs each (200 sigchecks) submittxes_1 = [] while len(usable_inputs) >= 100: tx = CTransaction() tx.vin = [usable_inputs.pop() for _ in range(100)] tx.vout = [CTxOut(0, CScript([OP_RETURN]))] tx.rehash() submittxes_1.append(tx) # mine 5000 funded outputs (10000 sigchecks) # will be used post-activation usable_inputs, tip = minefunding2(5000) # assemble them into 50 txes with 100 inputs each (200 sigchecks) submittxes_2 = [] while len(usable_inputs) >= 100: tx = CTransaction() tx.vin = [usable_inputs.pop() for _ in range(100)] tx.vout = [CTxOut(0, CScript([OP_RETURN]))] tx.rehash() submittxes_2.append(tx) # Check high sigcheck transactions logging.info("Create transaction that have high sigchecks") fundings = [] def make_spend(sigcheckcount): # Add a funding tx to fundings, and return a tx spending that using # scriptsig. logging.debug("Gen tx with {} sigchecks.".format(sigcheckcount)) def get_script_with_sigcheck(count): return CScript([cds_message, cds_pubkey] + (count - 1) * [OP_3DUP, OP_CHECKDATASIGVERIFY] + [OP_CHECKDATASIG]) # get funds locked with OP_1 sourcetx = self.spendable_outputs.popleft() # make funding that forwards to scriptpubkey last_sigcheck_count = ((sigcheckcount - 1) % 30) + 1 fundtx = create_transaction( sourcetx, get_script_with_sigcheck(last_sigcheck_count)) fill_sigcheck_script = get_script_with_sigcheck(30) remaining_sigcheck = sigcheckcount while remaining_sigcheck > 30: fundtx.vout[0].nValue -= 1000 fundtx.vout.append(CTxOut(100, bytes(fill_sigcheck_script))) remaining_sigcheck -= 30 fundtx.rehash() fundings.append(fundtx) # make the spending scriptsig = CScript([cds_signature]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig)) input_index = 2 remaining_sigcheck = sigcheckcount while remaining_sigcheck > 30: tx.vin.append( CTxIn(COutPoint(fundtx.sha256, input_index), scriptsig)) remaining_sigcheck -= 30 input_index += 1 tx.vout.append(CTxOut(0, CScript([OP_RETURN]))) pad_tx(tx) tx.rehash() return tx # Create transactions with many sigchecks. good_tx = make_spend(MAX_TX_SIGCHECK) bad_tx = make_spend(MAX_TX_SIGCHECK + 1) tip = self.build_block(tip, fundings) self.pynode.send_blocks_and_test([tip], node) # Both tx are accepted before the activation. pre_activation_sigcheck_block = self.build_block( tip, [good_tx, bad_tx]) self.pynode.send_blocks_and_test([pre_activation_sigcheck_block], node) node.invalidateblock(pre_activation_sigcheck_block.hash) # after block is invalidated these tx are put back into the mempool. Test uses them later so evict. waitFor(10, lambda: node.getmempoolinfo()["size"] == 2) node.evicttransaction(good_tx.hash) node.evicttransaction(bad_tx.hash) # Activation tests logging.info("Approach to just before upgrade activation") # Move our clock to the uprade time so we will accept such # future-timestamped blocks. node.setmocktime(SIGCHECKS_ACTIVATION_TIME + 10) # Mine six blocks with timestamp starting at # SIGCHECKS_ACTIVATION_TIME-1 blocks = [] for i in range(-1, 5): tip = self.build_block(tip, nTime=SIGCHECKS_ACTIVATION_TIME + i) blocks.append(tip) self.pynode.send_blocks_and_test(blocks, node, timeout=TIMEOUT) assert_equal(node.getblockchaininfo()['mediantime'], SIGCHECKS_ACTIVATION_TIME - 1) logging.info( "The next block will activate, but the activation block itself must follow old rules" ) # Send the 50 txes and get the node to mine as many as possible (it should do all) # The node is happy mining and validating a 10000 sigcheck block before # activation. self.pynode.send_txs_and_test(submittxes_1, node, timeout=TIMEOUT) [blockhash] = node.generate(1) assert_equal(set(node.getblock(blockhash, 1)["tx"][1:]), {t.hash for t in submittxes_1}) # We have activated, but let's invalidate that. assert_equal(node.getblockchaininfo()['mediantime'], SIGCHECKS_ACTIVATION_TIME) node.invalidateblock(blockhash) # Try again manually and invalidate that too goodblock = self.build_block(tip, submittxes_1) self.pynode.send_blocks_and_test([goodblock], node, timeout=TIMEOUT) node.invalidateblock(goodblock.hash) # All transactions should be back in mempool: validation is very slow in debug build waitFor( 60, lambda: set(node.getrawmempool()) == {t.hash for t in submittxes_1}) logging.info("Mine the activation block itself") tip = self.build_block(tip) self.pynode.send_blocks_and_test([tip], node, timeout=TIMEOUT) logging.info("We have activated!") assert_equal(node.getblockchaininfo()['mediantime'], SIGCHECKS_ACTIVATION_TIME) # All transactions get re-evaluated to count sigchecks, so wait for them waitFor( 60, lambda: set(node.getrawmempool()) == {t.hash for t in submittxes_1}) logging.info( "Try a block with a transaction going over the limit (limit: {})". format(MAX_TX_SIGCHECK)) bad_tx_block = self.build_block(tip, [bad_tx]) check_for_ban_on_rejected_block( self.pynode, node, bad_tx_block, reject_reason=BLOCK_SIGCHECKS_BAD_TX_SIGCHECKS) logging.info( "Try a block with a transaction just under the limit (limit: {})". format(MAX_TX_SIGCHECK)) good_tx_block = self.build_block(tip, [good_tx]) self.pynode.send_blocks_and_test([good_tx_block], node, timeout=TIMEOUT) node.invalidateblock(good_tx_block.hash) # save this tip for later # ~ upgrade_block = tip # Transactions still in pool: waitFor( 60, lambda: set(node.getrawmempool()) == {t.hash for t in submittxes_1}) logging.info( "Try sending 10000-sigcheck blocks after activation (limit: {})". format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO)) # Send block with same txes we just tried before activation badblock = self.build_block(tip, submittxes_1) check_for_ban_on_rejected_block( self.pynode, node, badblock, reject_reason="Invalid block due to bad-blk-sigchecks", expect_ban=True) logging.info( "There are too many sigchecks in mempool to mine in a single block. Make sure the node won't mine invalid blocks. Num tx: %s" % str(node.getmempoolinfo())) blk = node.generate(1) tip = self.getbestblock(node) # only 39 txes got mined. assert_equal(len(node.getrawmempool()), 11) logging.info( "Try sending 10000-sigcheck block with fresh transactions after activation (limit: {})" .format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO)) # Note: in the following tests we'll be bumping timestamp in order # to bypass any kind of 'bad block' cache on the node, and get a # fresh evaluation each time. # Try another block with 10000 sigchecks but all fresh transactions badblock = self.build_block(tip, submittxes_2, nTime=SIGCHECKS_ACTIVATION_TIME + 5) check_for_ban_on_rejected_block( self.pynode, node, badblock, reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS) # Send the same txes again with different block hash. Currently we don't # cache valid transactions in invalid blocks so nothing changes. badblock = self.build_block(tip, submittxes_2, nTime=SIGCHECKS_ACTIVATION_TIME + 6) check_for_ban_on_rejected_block( self.pynode, node, badblock, reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS) # Put all the txes in mempool, in order to get them cached: self.pynode.send_txs_and_test(submittxes_2, node, timeout=TIMEOUT) # Send them again, the node still doesn't like it. But the log # error message has now changed because the txes failed from cache. badblock = self.build_block(tip, submittxes_2, nTime=SIGCHECKS_ACTIVATION_TIME + 7) check_for_ban_on_rejected_block( self.pynode, node, badblock, reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS) logging.info( "Try sending 8000-sigcheck block after activation (limit: {})". format(MAXBLOCKSIZE // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO)) badblock = self.build_block(tip, submittxes_2[:40], nTime=SIGCHECKS_ACTIVATION_TIME + 5) check_for_ban_on_rejected_block( self.pynode, node, badblock, reject_reason=BLOCK_SIGCHECKS_BAD_BLOCK_SIGCHECKS) # redundant, but just to mirror the following test... node.set("consensus.maxBlockSigChecks=%d" % MAX_BLOCK_SIGCHECKS) logging.info( "Bump the excessiveblocksize limit by 1 byte, and send another block with same txes (new sigchecks limit: {})" .format((MAXBLOCKSIZE + 1) // BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO)) node.set("consensus.maxBlockSigChecks=%d" % (MAX_BLOCK_SIGCHECKS + 1)) tip = self.build_block(tip, submittxes_2[:40], nTime=SIGCHECKS_ACTIVATION_TIME + 6) # It should succeed now since limit should be 8000. self.pynode.send_blocks_and_test([tip], node, timeout=TIMEOUT)