def __init__(self): self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.block_time = int(time.time())+1 self.tip = None self.blocks = {}
def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {}
def __init__(self): super().__init__() self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-excessiveblocksize=%d" % self.excessive_block_size]]
def sign_stake_tx(self, block, stake_in_value, fZPoS=False): ''' signs a coinstake transaction :param block: (CBlock) block with stake to sign stake_in_value: (int) staked amount fZPoS: (bool) zerocoin stake :return: stake_tx_signed: (CTransaction) signed tx ''' self.block_sig_key = CECKey() if fZPoS: self.log.info("Signing zPoS stake...") # Create raw zerocoin stake TX (signed) raw_stake = self.node.createrawzerocoinstake(block.prevoutStake) stake_tx_signed_raw_hex = raw_stake["hex"] # Get stake TX private key to sign the block with stake_pkey = raw_stake["private-key"] self.block_sig_key.set_compressed(True) self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey)) else: # Create a new private key and get the corresponding public key self.block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff))) pubkey = self.block_sig_key.get_pubkey() # Create the raw stake TX (unsigned) scriptPubKey = CScript([pubkey, OP_CHECKSIG]) outNValue = int(stake_in_value + 2*COIN) stake_tx_unsigned = CTransaction() stake_tx_unsigned.nTime = block.nTime stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake)) stake_tx_unsigned.vin[0].nSequence = 0xffffffff stake_tx_unsigned.vout.append(CTxOut()) stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey)) # Sign the stake TX stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex'] # Deserialize the signed raw tx into a CTransaction object and return it stake_tx_signed = CTransaction() stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))) return stake_tx_signed
def get_tests(self): self.node = self.nodes[0] # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # First generate some blocks so we have some spendable coins block_hashes = self.node.generate(25) for i in range(COINBASE_MATURITY): self.tip = create_block( int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount() + 1), int(time.time())) self.tip.solve() yield accepted() for _ in range(10): self.node.sendtoaddress(self.node.getnewaddress(), 1000) block_hashes += self.node.generate(1) blocks = [] for block_hash in block_hashes: blocks.append(self.node.getblock(block_hash)) # These are our staking txs self.staking_prevouts = [] self.bad_vout_staking_prevouts = [] self.bad_txid_staking_prevouts = [] self.unconfirmed_staking_prevouts = [] for unspent in self.node.listunspent(): for block in blocks: if unspent['txid'] in block['tx']: tx_block_time = block['time'] break else: assert (False) if unspent['confirmations'] > COINBASE_MATURITY: self.staking_prevouts.append( (COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount']) * COIN, tx_block_time)) self.bad_vout_staking_prevouts.append( (COutPoint(int(unspent['txid'], 16), 0xff), int(unspent['amount']) * COIN, tx_block_time)) self.bad_txid_staking_prevouts.append( (COutPoint(int(unspent['txid'], 16) + 1, unspent['vout']), int(unspent['amount']) * COIN, tx_block_time)) if unspent['confirmations'] < COINBASE_MATURITY: self.unconfirmed_staking_prevouts.append( (COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount']) * COIN, tx_block_time)) # First let 25 seconds pass so that we do not submit blocks directly after the last one #time.sleep(100) block_count = self.node.getblockcount() # 1 A block that does not have the correct timestamp mask t = int(time.time()) | 1 (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 2 A block that with a too high reward (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, outNValue=30006) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 3 A block with an incorrect block sig bad_key = CECKey() bad_key.set_secretbytes(hash256(b'horse staple battery')) (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.sign_block(bad_key) self.tip.rehash() yield rejected() # 4 A block that stakes with txs with too few confirmations (self.tip, block_sig_key) = self.create_unsigned_pos_block( self.unconfirmed_staking_prevouts) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 5 A block that with a coinbase reward (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[0].vout[0].nValue = 1 self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 6 A block that with no vout in the coinbase (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[0].vout = [] self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 7 A block way into the future t = (int(time.time()) + 100) & 0xfffffff0 (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 8 No vout in the staking tx (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[1].vout = [] self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 9 Unsigned coinstake. (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, signStakeTx=False) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 10 A block without a coinstake tx. (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx.pop(-1) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 11 A block without a coinbase. (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx.pop(0) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 12 A block where the coinbase has no outputs (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[0].vout = [] self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 13 A block where the coinstake has no outputs (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[1].vout.pop(-1) self.tip.vtx[1].vout.pop(-1) stake_tx_signed_raw_hex = self.node.signrawtransaction( bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex'] f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)) self.tip.vtx[1] = CTransaction() self.tip.vtx[1].deserialize(f) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 14 A block with an incorrect hashStateRoot (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.hashStateRoot = 0xe self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 15 A block with an incorrect hashUTXORoot (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.hashUTXORoot = 0xe self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 16 A block with an a signature on wrong header data (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.sign_block(block_sig_key) self.tip.nNonce = 0xfffe self.tip.rehash() yield rejected() # 17 A block with where the pubkey of the second output of the coinstake has been modified after block signing (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) scriptPubKey = self.tip.vtx[1].vout[1].scriptPubKey # Modify a byte of the pubkey self.tip.vtx[1].vout[ 1].scriptPubKey = scriptPubKey[0:20] + bytes.fromhex( hex(ord(scriptPubKey[20:21]) + 1)[2:4]) + scriptPubKey[21:] assert_equal(len(scriptPubKey), len(self.tip.vtx[1].vout[1].scriptPubKey)) stake_tx_signed_raw_hex = self.node.signrawtransaction( bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex'] f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)) self.tip.vtx[1] = CTransaction() self.tip.vtx[1].deserialize(f) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 18. A block in the past t = (int(time.time()) - 700) & 0xfffffff0 (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 19. A block with too many coinbase vouts (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.vtx[0].vout.append(CTxOut(0, CScript([OP_TRUE]))) self.tip.vtx[0].rehash() self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 20. A block where the coinstake's vin is not the prevout specified in the block (self.tip, block_sig_key) = self.create_unsigned_pos_block( self.staking_prevouts, coinStakePrevout=self.staking_prevouts[-1][0]) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 21. A block that stakes with valid txs but invalid vouts (self.tip, block_sig_key) = self.create_unsigned_pos_block( self.bad_vout_staking_prevouts) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # 22. A block that stakes with txs that do not exist (self.tip, block_sig_key) = self.create_unsigned_pos_block( self.bad_txid_staking_prevouts) self.tip.sign_block(block_sig_key) self.tip.rehash() yield rejected() # Make sure for certain that no blocks were accepted. (This is also to make sure that no segfaults ocurred) assert_equal(self.node.getblockcount(), block_count) # And at last, make sure that a valid pos block is accepted (self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts) self.tip.sign_block(block_sig_key) self.tip.rehash() yield accepted() assert_equal(self.node.getblockcount(), block_count + 1)
class ReviewBase_Coin_FakeStakeTest(BitcoinTestFramework): def set_test_params(self): ''' Setup test environment :param: :return: ''' self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-staking=1', '-debug=net']] * self.num_nodes def setup_network(self): ''' Can't rely on syncing all the nodes when staking=1 :param: :return: ''' self.setup_nodes() for i in range(self.num_nodes - 1): for j in range(i + 1, self.num_nodes): connect_nodes_bi(self.nodes, i, j) def init_test(self): ''' Initializes test parameters :param: :return: ''' title = "*** Starting %s ***" % self.__class__.__name__ underline = "-" * len(title) self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description) # Global Test parameters (override in run_test) self.DEFAULT_FEE = 0.1 # Spam blocks to send in current test self.NUM_BLOCKS = 30 # Setup the p2p connections and start up the network thread. self.test_nodes = [] for i in range(self.num_nodes): self.test_nodes.append(TestNode()) self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i)) network_thread_start() # Start up network handling in another thread self.node = self.nodes[0] # Let the test nodes get in sync for i in range(self.num_nodes): self.test_nodes[i].wait_for_verack() def run_test(self): ''' Performs the attack of this test - run init_test first. :param: :return: ''' self.description = "" self.init_test() return def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}): ''' creates a block to spam the network with :param hashPrevBlock: (hex string) hash of previous block stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake height: (int) block height fStakeDoubleSpent: (bool) spend the coinstake input inside the block fZPoS: (bool) stake the block with zerocoin spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake :return block: (CBlock) generated block ''' # If not given inputs to create spam txes, use a copy of the staking inputs if len(spendingPrevOuts) == 0: spendingPrevOuts = dict(stakingPrevOuts) # Get current time current_time = int(time.time()) nTime = current_time & 0xfffffff0 # Create coinbase TX # Even if PoS blocks have empty coinbase vout, the height is required for the vin script coinbase = create_coinbase(height) coinbase.vout[0].nValue = 0 coinbase.vout[0].scriptPubKey = b"" coinbase.nTime = nTime coinbase.rehash() # Create Block with coinbase block = create_block(int(hashPrevBlock, 16), coinbase, nTime) # Find valid kernel hash - Create a new private key used for block signing. if not block.solve_stake(stakingPrevOuts): raise Exception("Not able to solve for any prev_outpoint") # Sign coinstake TX and add it to the block signed_stake_tx = self.sign_stake_tx( block, stakingPrevOuts[block.prevoutStake][0], fZPoS) block.vtx.append(signed_stake_tx) # Remove coinstake input prevout unless we want to try double spending in the same block. # Skip for zPoS as the spendingPrevouts are just regular UTXOs if not fZPoS and not fStakeDoubleSpent: del spendingPrevOuts[block.prevoutStake] # remove a random prevout from the list # (to randomize block creation if the same height is picked two times) if len(spendingPrevOuts) > 0: del spendingPrevOuts[choice(list(spendingPrevOuts))] # Create spam for the block. Sign the spendingPrevouts for outPoint in spendingPrevOuts: value_out = int(spendingPrevOuts[outPoint][0] - self.DEFAULT_FEE * COIN) tx = create_transaction(outPoint, b"", value_out, nTime, scriptPubKey=CScript([ self.block_sig_key.get_pubkey(), OP_CHECKSIG ])) # sign txes signed_tx_hex = self.node.signrawtransaction( bytes_to_hex_str(tx.serialize()))['hex'] signed_tx = CTransaction() signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex))) block.vtx.append(signed_tx) # Get correct MerkleRoot and rehash block block.hashMerkleRoot = block.calc_merkle_root() block.rehash() # Sign block with coinstake key and return it block.sign_block(self.block_sig_key) return block def spend_utxo(self, utxo, address_list): ''' spend amount from previously unspent output to a provided address :param utxo: (JSON) returned from listunspent used as input addresslist: (string) destination address :return: txhash: (string) tx hash if successful, empty string otherwise ''' try: inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}] out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE) / len(address_list) outputs = {} for address in address_list: outputs[address] = out_amount spendingTx = self.node.createrawtransaction(inputs, outputs) spendingTx_signed = self.node.signrawtransaction(spendingTx) if spendingTx_signed["complete"]: txhash = self.node.sendrawtransaction(spendingTx_signed["hex"]) return txhash else: self.log.warning("Error: %s" % str(spendingTx_signed["errors"])) return "" except JSONRPCException as e: self.log.error("JSONRPCException: %s" % str(e)) return "" def spend_utxos(self, utxo_list, address_list=[]): ''' spend utxos to provided list of addresses or 10 new generate ones. :param utxo_list: (JSON list) returned from listunspent used as input address_list: (string list) [optional] recipient ReviewBase_Coin addresses. if not set, 10 new addresses will be generated from the wallet for each tx. :return: txHashes (string list) tx hashes ''' txHashes = [] # If not given, get 10 new addresses from self.node wallet if address_list == []: for i in range(10): address_list.append(self.node.getnewaddress()) for utxo in utxo_list: try: # spend current utxo to provided addresses txHash = self.spend_utxo(utxo, address_list) if txHash != "": txHashes.append(txHash) except JSONRPCException as e: self.log.error("JSONRPCException: %s" % str(e)) continue return txHashes def stake_amplification_step(self, utxo_list, address_list=[]): ''' spends a list of utxos providing the list of new outputs :param utxo_list: (JSON list) returned from listunspent used as input address_list: (string list) [optional] recipient ReviewBase_Coin addresses. :return: new_utxos: (JSON list) list of new (valid) inputs after the spends ''' self.log.info("--> Stake Amplification step started with %d UTXOs", len(utxo_list)) txHashes = self.spend_utxos(utxo_list, address_list) num_of_txes = len(txHashes) new_utxos = [] if num_of_txes > 0: self.log.info( "Created %d transactions...Mining 2 blocks to include them..." % num_of_txes) self.node.generate(2) time.sleep(2) new_utxos = self.node.listunspent() self.log.info( "Amplification step produced %d new \"Fake Stake\" inputs:" % len(new_utxos)) return new_utxos def stake_amplification(self, utxo_list, iterations, address_list=[]): ''' performs the "stake amplification" which gives higher chances at finding fake stakes :param utxo_list: (JSON list) returned from listunspent used as input iterations: (int) amount of stake amplification steps to perform address_list: (string list) [optional] recipient ReviewBase_Coin addresses. :return: all_inputs: (JSON list) list of all spent inputs ''' self.log.info("** Stake Amplification started with %d UTXOs", len(utxo_list)) valid_inputs = utxo_list all_inputs = [] for i in range(iterations): all_inputs = all_inputs + valid_inputs old_inputs = valid_inputs valid_inputs = self.stake_amplification_step( old_inputs, address_list) self.log.info("** Stake Amplification ended with %d \"fake\" UTXOs", len(all_inputs)) return all_inputs def sign_stake_tx(self, block, stake_in_value, fZPoS=False): ''' signs a coinstake transaction :param block: (CBlock) block with stake to sign stake_in_value: (int) staked amount fZPoS: (bool) zerocoin stake :return: stake_tx_signed: (CTransaction) signed tx ''' self.block_sig_key = CECKey() if fZPoS: self.log.info("Signing zPoS stake...") # Create raw zerocoin stake TX (signed) raw_stake = self.node.createrawzerocoinstake(block.prevoutStake) stake_tx_signed_raw_hex = raw_stake["hex"] # Get stake TX private key to sign the block with stake_pkey = raw_stake["private-key"] self.block_sig_key.set_compressed(True) self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey)) else: # Create a new private key and get the corresponding public key self.block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff))) pubkey = self.block_sig_key.get_pubkey() # Create the raw stake TX (unsigned) scriptPubKey = CScript([pubkey, OP_CHECKSIG]) outNValue = int(stake_in_value + 2 * COIN) stake_tx_unsigned = CTransaction() stake_tx_unsigned.nTime = block.nTime stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake)) stake_tx_unsigned.vin[0].nSequence = 0xffffffff stake_tx_unsigned.vout.append(CTxOut()) stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey)) # Sign the stake TX stake_tx_signed_raw_hex = self.node.signrawtransaction( bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex'] # Deserialize the signed raw tx into a CTransaction object and return it stake_tx_signed = CTransaction() stake_tx_signed.deserialize( BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))) return stake_tx_signed def get_prevouts(self, utxo_list, blockHeight, zpos=False): ''' get prevouts (map) for each utxo in a list :param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input <if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input blockHeight: (int) height of the previous block zpos: (bool) type of utxo_list :return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints to amount, block_time, nStakeModifier, hashStake ''' zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000] stakingPrevOuts = {} for utxo in utxo_list: if zpos: # get mint checkpoint checkpointHeight = blockHeight - 200 checkpointBlock = self.node.getblock( self.node.getblockhash(checkpointHeight), True) checkpoint = int(checkpointBlock['acc_checkpoint'], 16) # parse checksum and get checksumblock pos = zerocoinDenomList.index(utxo['denomination']) checksum = (checkpoint >> (32 * (len(zerocoinDenomList) - 1 - pos))) & 0xFFFFFFFF checksumBlock = self.node.getchecksumblock( hex(checksum), utxo['denomination'], True) # get block hash and block time txBlockhash = checksumBlock['hash'] txBlocktime = checksumBlock['time'] else: # get raw transaction for current input utxo_tx = self.node.getrawtransaction(utxo['txid'], 1) # get block hash and block time txBlocktime = utxo_tx['blocktime'] txBlockhash = utxo_tx['blockhash'] # get Stake Modifier stakeModifier = int( self.node.getblock(txBlockhash)['modifier'], 16) # assemble prevout object utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos) return stakingPrevOuts def log_data_dir_size(self): ''' Prints the size of the '/regtest/blocks' directory. :param: :return: ''' init_size = dir_size(self.node.datadir + "/regtest/blocks") self.log.info("Size of data dir: %s kilobytes" % str(init_size)) def test_spam(self, name, staking_utxo_list, fRandomHeight=False, randomRange=0, randomRange2=0, fDoubleSpend=False, fMustPass=False, fZPoS=False, spending_utxo_list=[]): ''' General method to create, send and test the spam blocks :param name: (string) chain branch (usually either "Main" or "Forked") staking_utxo_list: (string list) utxos to use for staking fRandomHeight: (bool) send blocks at random height randomRange: (int) if fRandomHeight=True, height is >= current-randomRange randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2 fDoubleSpend: (bool) if true, stake input is double spent in block.vtx fMustPass: (bool) if true, the blocks must be stored on disk fZPoS: (bool) stake the block with zerocoin spending_utxo_list: (string list) utxos to use for spending :return: err_msgs: (string list) reports error messages from the test or an empty list if test is successful ''' # Create empty error messages list err_msgs = [] # Log initial datadir size self.log_data_dir_size() # Get latest block number and hash block_count = self.node.getblockcount() pastBlockHash = self.node.getblockhash(block_count) randomCount = block_count self.log.info("Current height: %d" % block_count) for i in range(0, self.NUM_BLOCKS): if i != 0: self.log.info("Sent %d blocks out of %d" % (i, self.NUM_BLOCKS)) # if fRandomHeight=True get a random block number (in range) and corresponding hash if fRandomHeight: randomCount = randint(block_count - randomRange, block_count - randomRange2) pastBlockHash = self.node.getblockhash(randomCount) # Get spending prevouts and staking prevouts for the height of current block current_block_n = randomCount + 1 stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS) spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount) # Create the spam block block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n, fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts) # Log time and size of the block block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime)) block_size = len(block.serialize()) / 1000 self.log.info( "Sending block %d [%s...] - nTime: %s - Size (kb): %.2f", current_block_n, block.hash[:7], block_time, block_size) # Try submitblock var = self.node.submitblock(bytes_to_hex_str(block.serialize())) time.sleep(1) if (not fMustPass and var not in [None, "bad-txns-invalid-zrview"] ) or (fMustPass and var != "inconclusive"): self.log.error("submitblock [fMustPass=%s] result: %s" % (str(fMustPass), str(var))) err_msgs.append("submitblock %d: %s" % (current_block_n, str(var))) # Try sending the message block msg = msg_block(block) try: self.test_nodes[0].handle_connect() self.test_nodes[0].send_message(msg) time.sleep(2) block_ret = self.node.getblock(block.hash) if not fMustPass and block_ret is not None: self.log.error("Error, block stored in %s chain" % name) err_msgs.append("getblock %d: result not None" % current_block_n) if fMustPass: if block_ret is None: self.log.error("Error, block NOT stored in %s chain" % name) err_msgs.append("getblock %d: result is None" % current_block_n) else: self.log.info("Good. Block IS stored on disk.") except JSONRPCException as e: exc_msg = str(e) if exc_msg == "Can't read block from disk (-32603)": if fMustPass: self.log.warning("Bad! Block was NOT stored to disk.") err_msgs.append(exc_msg) else: self.log.info("Good. Block was not stored on disk.") else: self.log.warning(exc_msg) err_msgs.append(exc_msg) except Exception as e: exc_msg = str(e) self.log.error(exc_msg) err_msgs.append(exc_msg) self.log.info("Sent all %s blocks." % str(self.NUM_BLOCKS)) # Log final datadir size self.log_data_dir_size() # Return errors list return err_msgs
def makePubKeys(numOfKeys): key = CECKey() key.set_secretbytes(b"randombytes2") return [key.get_pubkey()] * numOfKeys
def get_tests(self): # shorthand for functions block = self.chain.next_block node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # Create and mature coinbase txs test = TestInstance(sync_every_block=False) for i in range(200): block(i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on coinbase_utxos = [self.chain.get_spendable_output() for _ in range(50)] # Create a p2sh transactions that spends coinbase tx def new_P2SH_tx(): output = coinbase_utxos.pop(0) return create_and_sign_transaction(spend_tx=output.tx, n=output.n, value=output.tx.vout[0].nValue - 100, private_key=self.coinbase_key, script=self.p2sh_script) # Add the transactions to the block block(200) p2sh_txs = [new_P2SH_tx() for _ in range(4)] self.chain.update_block(200, p2sh_txs) yield self.accepted() coinbase_to_p2sh_tx = new_P2SH_tx() # rpc tests node.signrawtransaction( ToHex(coinbase_to_p2sh_tx )) # check if we can sign this tx (sign is unused) coinbase_to_p2sh_tx_id = node.sendrawtransaction( ToHex(coinbase_to_p2sh_tx)) # sending using rpc # Create new private key that will fail with the redeem script wrongPrivateKey = CECKey() wrongPrivateKey.set_secretbytes(b"wrongkeysecret") wrongkey_txn = self.spend_p2sh_tx(p2sh_txs[0], privateKey=wrongPrivateKey) # A transaction with this output script can't get into the mempool assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed", node.sendrawtransaction, ToHex(wrongkey_txn)) # A transaction with this output script can get into the mempool correctkey_tx = self.spend_p2sh_tx(p2sh_txs[1]) correctkey_tx_id = node.sendrawtransaction(ToHex(correctkey_tx)) assert_equal(set(node.getrawmempool()), {correctkey_tx_id, coinbase_to_p2sh_tx_id}) block(201) self.chain.update_block(201, [correctkey_tx, coinbase_to_p2sh_tx]) yield self.accepted() assert node.getblockcount() == self.genesisactivationheight - 1 # This block would be at genesis height # transactions with P2SH output will be rejected block(202) p2sh_tx_after_genesis = new_P2SH_tx() self.chain.update_block(202, [p2sh_tx_after_genesis]) yield self.rejected(RejectResult(16, b'bad-txns-vout-p2sh')) self.chain.set_tip(201) block(203, coinbase_pubkey=self.coinbase_pubkey) yield self.accepted() # we are at gensis height assert node.getblockcount() == self.genesisactivationheight # P2SH transactions are rejected and cant enter the mempool assert_raises_rpc_error(-26, "bad-txns-vout-p2sh", node.sendrawtransaction, ToHex(new_P2SH_tx())) # Create new private key that would fail with the old redeem script, the same behavior as before genesis wrongPrivateKey = CECKey() wrongPrivateKey.set_secretbytes(b"wrongkeysecret") wrongkey_txn = self.spend_p2sh_tx(p2sh_txs[2], privateKey=wrongPrivateKey) # A transaction with this output script can't get into the mempool assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed", node.sendrawtransaction, ToHex(wrongkey_txn)) # We can spend old P2SH transactions correctkey_tx = self.spend_p2sh_tx(p2sh_txs[3]) sign_result = node.signrawtransaction(ToHex(correctkey_tx)) assert sign_result['complete'], "Should be able to sign" correctkey_tx_id = node.sendrawtransaction(ToHex(correctkey_tx)) assert_equal(set(node.getrawmempool()), {correctkey_tx_id}) tx1_raw = node.getrawtransaction(p2sh_txs[0].hash, True) assert tx1_raw["vout"][0]["scriptPubKey"]["type"] == "scripthash"
class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} def setup_network(self): self.extra_args = [['-norelaypriority']] self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() def add_options(self, parser): super().add_options(parser) parser.add_argument("--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE])): if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value coinbase.rehash() if spend == None: block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # spend 1 satoshi tx = create_transaction(spend.tx, spend.n, b"", 1, script) self.sign_tx(tx, spend.tx, spend.n) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Do PoW, which is very inexpensive on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block create_tx = self.create_tx # shorthand for variables node = self.nodes[0] # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(33): out.append(get_spendable_output()) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Creates a new transaction using a p2sh transaction as input def spend_p2sh_tx(p2sh_tx_to_spend, output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append( CTxIn(COutPoint(p2sh_tx_to_spend.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx_to_spend.vout[0].nValue) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # P2SH tests # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction(out[0].tx, out[0].n, 1, p2sh_script) # Add the transaction to the block block(1) update_block(1, [p2sh_tx]) yield accepted() # Sigops p2sh limit for the mempool test p2sh_sigops_limit_mempool = MAX_STANDARD_TX_SIGOPS - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh script too_many_p2sh_sigops_mempool = CScript([OP_CHECKSIG] * (p2sh_sigops_limit_mempool + 1)) # A transaction with this output script can't get into the mempool assert_raises_rpc_error( -26, RPC_TXNS_TOO_MANY_SIGOPS_ERROR, node.sendrawtransaction, ToHex(spend_p2sh_tx(p2sh_tx, too_many_p2sh_sigops_mempool))) # The transaction is rejected, so the mempool should still be empty assert_equal(set(node.getrawmempool()), set()) # Max sigops in one p2sh txn max_p2sh_sigops_mempool = CScript([OP_CHECKSIG] * (p2sh_sigops_limit_mempool)) # A transaction with this output script can get into the mempool max_p2sh_sigops_txn = spend_p2sh_tx(p2sh_tx, max_p2sh_sigops_mempool) max_p2sh_sigops_txn_id = node.sendrawtransaction( ToHex(max_p2sh_sigops_txn)) assert_equal(set(node.getrawmempool()), {max_p2sh_sigops_txn_id}) # Mine the transaction block(2, spend=out[1]) update_block(2, [max_p2sh_sigops_txn]) yield accepted() # The transaction has been mined, it's not in the mempool anymore assert_equal(set(node.getrawmempool()), set())
def run_test(self): node = self.nodes[0] # Generate 6 keys. rawkeys = [] pubkeys = [] for i in range(6): raw_key = CECKey() raw_key.set_secretbytes(('privkey%d' % i).encode('ascii')) rawkeys.append(raw_key) pubkeys = [CPubKey(key.get_pubkey()) for key in rawkeys] # Create a 4-of-6 multi-sig wallet with CLTV. height = 210 redeem_script = CScript( [CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP ] + # CLTV (lock_time >= 210) [OP_4] + pubkeys + [OP_6, OP_CHECKMULTISIG]) # multi-sig hex_redeem_script = bytes_to_hex_str(redeem_script) p2sh_address = script_to_p2sh(redeem_script, main=False) # Send 1 coin to the mult-sig wallet. txid = node.sendtoaddress(p2sh_address, 1.0) raw_tx = node.getrawtransaction(txid, True) try: node.importaddress(hex_redeem_script, 'cltv', True, True) except Exception as err: pass assert_equal( sig(node.getreceivedbyaddress(p2sh_address, 0) - Decimal(1.0)), 0) # Mine one block to confirm the transaction. node.generate(1) # block 201 assert_equal( sig(node.getreceivedbyaddress(p2sh_address, 1) - Decimal(1.0)), 0) # Try to spend the coin. addr_to = node.getnewaddress('') # (1) Find the UTXO for vout in raw_tx['vout']: if vout['scriptPubKey']['addresses'] == [p2sh_address]: vout_n = vout['n'] hex_script_pubkey = raw_tx['vout'][vout_n]['scriptPubKey']['hex'] value = raw_tx['vout'][vout_n]['value'] # (2) Create a tx inputs = [{ "txid": txid, "vout": vout_n, "scriptPubKey": hex_script_pubkey, "redeemScript": hex_redeem_script, "amount": value, }] outputs = {addr_to: 0.999} lock_time = height hex_spend_raw_tx = node.createrawtransaction(inputs, outputs, lock_time) hex_funding_raw_tx = node.getrawtransaction(txid, False) # (3) Try to sign the spending tx. tx0 = CTransaction() tx0.deserialize(io.BytesIO(hex_str_to_bytes(hex_funding_raw_tx))) tx1 = CTransaction() tx1.deserialize(io.BytesIO(hex_str_to_bytes(hex_spend_raw_tx))) self.sign_tx(tx1, tx0, vout_n, redeem_script, 0, rawkeys[:4]) # Sign with key[0:4] # Mine some blocks to pass the lock time. node.generate(10) # Spend the CLTV multi-sig coins. raw_tx1 = tx1.serialize() hex_raw_tx1 = bytes_to_hex_str(raw_tx1) node.sendrawtransaction(hex_raw_tx1) # Check the tx is accepted by mempool but not confirmed. assert_equal( sig(node.getreceivedbyaddress(addr_to, 0) - Decimal(0.999)), 0) assert_equal(sig(node.getreceivedbyaddress(addr_to, 1)), 0) # Mine a block to confirm the tx. node.generate(1) assert_equal( sig(node.getreceivedbyaddress(addr_to, 1) - Decimal(0.999)), 0)
def run_test(self): # Connect to node0 node0 = BaseNode() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread node0.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.nodes.append( start_node(1, self.options.tmpdir, ["-debug", "-assumevalid=" + hex(block102.sha256)])) node1 = BaseNode() # connects to node1 connections.append( NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) node1.add_connection(connections[1]) node1.wait_for_verack() self.nodes.append( start_node(2, self.options.tmpdir, ["-debug", "-assumevalid=" + hex(block102.sha256)])) node2 = BaseNode() # connects to node2 connections.append( NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) node2.add_connection(connections[2]) node2.wait_for_verack() # send header lists to all three nodes node0.send_header_for_blocks(self.blocks[0:2000]) node0.send_header_for_blocks(self.blocks[2000:]) node1.send_header_for_blocks(self.blocks[0:2000]) node1.send_header_for_blocks(self.blocks[2000:]) node2.send_header_for_blocks(self.blocks[0:200]) # Send 102 blocks to node0. Block 102 will be rejected. for i in range(101): node0.send_message(msg_block(self.blocks[i])) node0.sync_with_ping() # make sure the most recent block is synced node0.send_message(msg_block(self.blocks[101])) assert_equal( self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101) # Send 3102 blocks to node1. All blocks will be accepted. for i in range(2202): node1.send_message(msg_block(self.blocks[i])) node1.sync_with_ping() # make sure the most recent block is synced assert_equal( self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send 102 blocks to node2. Block 102 will be rejected. for i in range(101): node2.send_message(msg_block(self.blocks[i])) node2.sync_with_ping() # make sure the most recent block is synced node2.send_message(msg_block(self.blocks[101])) assert_equal( self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
class PTVP2PTest(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.genesisactivationheight = 600 # The coinbase key used. self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() # Locking scripts used in the test. self.locking_script_1 = CScript([self.coinbase_pubkey, OP_CHECKSIG]) self.locking_script_2 = CScript([1, 1, OP_ADD, OP_DROP]) self.default_args = [ '-debug', '-maxgenesisgracefulperiod=0', '-genesisactivationheight=%d' % self.genesisactivationheight ] self.extra_args = [self.default_args] * self.num_nodes def run_test(self): self.test.run() def check_rejected(self, rejected_txs, should_be_rejected_tx_set): wait_until(lambda: {tx.data for tx in rejected_txs} == {o.sha256 for o in should_be_rejected_tx_set}, timeout=20) def check_mempool(self, rpc, should_be_in_mempool, timeout=20): wait_until(lambda: set(rpc.getrawmempool()) == {t.hash for t in should_be_in_mempool}, timeout=timeout) def check_mempool_with_subset(self, rpc, should_be_in_mempool, timeout=20): wait_until(lambda: {t.hash for t in should_be_in_mempool}.issubset( set(rpc.getrawmempool())), timeout=timeout) def check_intersec_with_mempool(self, rpc, txs_set): return set(rpc.getrawmempool()).intersection(t.hash for t in txs_set) def get_front_slice(self, spends, num): txs_slice = spends[0:num] del spends[0:num] return txs_slice # Sign a transaction, using the key we know about. # This signs input 0 in tx, which is assumed to be spending output n in spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) # A helper function to generate new txs spending all outpoints from prev_txs set. def generate_transactons(self, prev_txs, unlocking_script, locking_script, num_of_ds_txs=0, fee=2000000, factor=10): gen_txs = [] ds_txs = [] for prev_tx in prev_txs: for n, vout in enumerate(prev_tx.vout): tx = CTransaction() out_val = vout.nValue - fee tx.vout.extend((CTxOut(out_val, locking_script), ) * factor) tx.vin.append( CTxIn(COutPoint(prev_tx.sha256, n), unlocking_script, 0xffffffff)) # Use the first unspent txn as a common input for all double spend transactions. if num_of_ds_txs and len(ds_txs) < num_of_ds_txs - 1 and len( gen_txs): tx.vin.append( CTxIn(COutPoint(prev_txs[0].sha256, 0), unlocking_script, 0xffffffff)) tx.calc_sha256() ds_txs.append(tx) continue tx.calc_sha256() gen_txs.append(tx) # To simplify further checks, move the first unspent txn to the ds_txs set. if num_of_ds_txs: ds_txs.append(gen_txs[0]) del gen_txs[0] if len(ds_txs) != num_of_ds_txs: raise Exception( 'Cannot create required number of double spend txs.') return gen_txs, ds_txs # Generate transactions in order so the first transaction's output will be an input for the second transaction. def get_chained_txs(self, spend, num_of_txs, unlocking_script, locking_script, money_to_spend, factor): txns = [] for _ in range(0, num_of_txs): if factor == 1: money_to_spend = money_to_spend - 1000 # Create a new transaction. tx = create_transaction(spend.tx, spend.n, unlocking_script, money_to_spend, locking_script) # Extend the number of outputs to the required size. tx.vout.extend(tx.vout * (factor - 1)) # Sign txn. self.sign_tx(tx, spend.tx, spend.n) tx.rehash() txns.append(tx) # Use the first outpoint to spend in the second iteration. spend = PreviousSpendableOutput(tx, 0) return txns # Create a required number of chains with equal length. # - each tx is configured to have factor outpoints with the same locking_script. def get_txchains_n(self, num_of_chains, chain_length, spend, unlocking_script, locking_script, money_to_spend, factor): if num_of_chains > len(spend): raise Exception('Insufficient number of spendable outputs.') txchains = [] for x in range(0, num_of_chains): txchains += self.get_chained_txs(spend[x], chain_length, unlocking_script, locking_script, money_to_spend, factor) return txchains # A helper function to create and send a set of tx chains. def generate_and_send_txchains_n(self, conn, num_of_chains, chain_length, spend, locking_script, money_to_spend=5000000000, factor=10, timeout=60): # Create and send txs. In this case there will be num_txs_to_create txs of chain length equal 1. txchains = self.get_txchains_n(num_of_chains, chain_length, spend, CScript(), locking_script, money_to_spend, factor) for tx in range(len(txchains)): conn.send_message(msg_tx(txchains[tx])) return txchains # # Pre-defined testing scenarios. # # This scenario is being used to generate and send a set of standard txs in test cases. def run_scenario1(self, conn, spend, num_txs_to_create, chain_length, locking_script, money_to_spend=2000000, factor=10, timeout=60): return self.generate_and_send_txchains_n(conn, num_txs_to_create, chain_length, spend, locking_script, money_to_spend, factor, timeout) # This scenario is being used to generate and send a set of non-standard txs in test cases. # - there will be num_txs_to_create txs of chain length equal 1. # - from a single spend 2499 txs can be created (due to value of the funding tx and value assigned to outpoints: 5000000000/2000000 = 2500) # - The exact number of 2500 txs could be created by including '-limitfreerelay=1000' param in the node's config. # - The value 2000000 meets requirements of sufficient fee per txn size (used in the test). def run_scenario2(self, conn, spend, num_txs_to_create, locking_script, num_ds_to_create=0, additional_txs=[], shuffle_txs=False, send_txs=True, money_to_spend=2000000, timeout=60): # A handler to catch reject messages. rejected_txs = [] def on_reject(conn, msg): rejected_txs.append(msg) # A double spend reject message is the expected one to occur. assert_equal(msg.reason, b'txn-double-spend-detected') conn.cb.on_reject = on_reject # Create and send tx chains with non-std outputs. # - one tx with vout_size=num_txs_to_create outpoints will be created txchains = self.generate_and_send_txchains_n(conn, 1, 1, spend, locking_script, money_to_spend, num_txs_to_create, timeout) # Check if required transactions are accepted by the mempool. self.check_mempool(conn.rpc, txchains, timeout) # Create a new block # - having an empty mempool (before submitting non-std txs) will simplify further checks. conn.rpc.generate(1) # Create and send transactions spending non-std outputs. nonstd_txs, ds_txs = self.generate_transactons(txchains, CScript([OP_TRUE]), locking_script, num_ds_to_create) all_txs = nonstd_txs + ds_txs + additional_txs # Shuffle txs if it is required if shuffle_txs: random.shuffle(all_txs) # Send txs if it is required if send_txs: for tx in all_txs: conn.send_message(msg_tx(tx)) # Return ds set if was requested. if len(ds_txs): return nonstd_txs + additional_txs, ds_txs, rejected_txs return nonstd_txs + additional_txs, rejected_txs # This scenario is being used to generate and send multiple subsets of non-standard txs in test cases. # - scenario2 is used to prepare the required size of the set # - each subset is created from a different funding txn # - as a result, there is no intersection between subsets def run_scenario3(self, conn, spend, num_txs_to_create, locking_script, num_ds_to_create=0, shuffle_txs=False, money_to_spend=2000000, timeout=60): all_nonstd_txs = [] all_ds_txs = [] # Create the set of required txs. for tx in spend: nonstd_txs, ds_txs, rejected_txs = self.run_scenario2( conn, [tx], num_txs_to_create, locking_script, num_ds_to_create, [], shuffle_txs, False, money_to_spend, timeout) all_nonstd_txs += nonstd_txs all_ds_txs += ds_txs all_txs = all_nonstd_txs + all_ds_txs # Shuffle txs if it is required if shuffle_txs: random.shuffle(all_txs) # Send txs for tx in all_txs: conn.send_message(msg_tx(tx)) # Return ds set if was required to create. if len(all_ds_txs): return all_nonstd_txs, all_ds_txs, rejected_txs return all_nonstd_txs, rejected_txs def get_tests(self): # Shorthand for functions block = self.chain.next_block node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # Create a new block block(0, coinbase_pubkey=self.coinbase_pubkey) self.chain.save_spendable_output() yield self.accepted() # Now we need that block to mature so we can spend the coinbase. # Also, move block height on beyond Genesis activation. test = TestInstance(sync_every_block=False) for i in range(600): block(5000 + i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # Collect spendable outputs now to avoid cluttering the code later on. out = [] for i in range(200): out.append(self.chain.get_spendable_output()) self.stop_node(0) # # Test Case 1 (TC1). # # - 5000 standard txs used (100 txn chains, each of length 50) # - 1 peer connected to node0 # # The number of txs used in the test case. tc1_txchains_num = 100 tc1_tx_chain_length = 50 # Select funding transactions to use: # - tc1_txchains_num funding transactions are needed in this test case. spend_txs = self.get_front_slice(out, tc1_txchains_num) args = [ '-checkmempool=0', '-persistmempool=0', '-limitancestorcount=50', '-txnvalidationasynchrunfreq=100', '-numstdtxvalidationthreads=6', '-numnonstdtxvalidationthreads=2' ] with self.run_node_with_connections( 'TC1: {} std txn chains used, each of length {}.'.format( tc1_txchains_num, tc1_tx_chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. std_txs = self.run_scenario1(conn, spend_txs, tc1_txchains_num, tc1_tx_chain_length, self.locking_script_1, 5000000000, 1) wait_for_ptv_completion(conn, tc1_txchains_num * tc1_tx_chain_length) # Check if required transactions are accepted by the mempool. self.check_mempool(conn.rpc, std_txs, timeout=30) assert_equal(conn.rpc.getmempoolinfo()['size'], tc1_txchains_num * tc1_tx_chain_length) # # Test Case 2 (TC2). # # - 2400 non-standard txs (with a simple locking script) used # - 1 peer connected to node0 # # The number of txs used in the test case. tc2_txs_num = 2400 # Select funding transactions to use: # - one funding transaction is needed in this test case. spend_txs = self.get_front_slice(out, 1) args = ['-checkmempool=0', '-persistmempool=0'] with self.run_node_with_connections( 'TC2: {} non-std txs used.'.format(tc2_txs_num), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. nonstd_txs, rejected_txs = self.run_scenario2( conn, spend_txs, tc2_txs_num, self.locking_script_2) wait_for_ptv_completion(conn, tc2_txs_num) # Check if required transactions are accepted by the mempool. self.check_mempool(conn.rpc, nonstd_txs, timeout=30) assert_equal(len(rejected_txs), 0) assert_equal(conn.rpc.getmempoolinfo()['size'], tc2_txs_num) # # Test Case 3 (TC3). # # - 2400 valid non-standard txs (with a simple locking script) used # - 100 double spend txs used # - 1 peer connected to node0 # From the double spends set only 1 txn is accepted by the mempool. # # The number of txs used in the test case. tc3_txs_num = 2400 ds_txs_num = 100 # Select funding transactions to use: # - one funding transaction is needed in this test case. spend_txs = self.get_front_slice(out, 1) args = ['-checkmempool=0', '-persistmempool=0'] with self.run_node_with_connections( 'TC3: {} non-std txs ({} double spends) used.'.format( tc3_txs_num, ds_txs_num), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. nonstd_txs, ds_txs, _ = self.run_scenario2(conn, spend_txs, tc3_txs_num, self.locking_script_2, ds_txs_num) wait_for_ptv_completion(conn, len(nonstd_txs) + 1) # All txs from the nonstd_txs result set should be accepted self.check_mempool_with_subset(conn.rpc, nonstd_txs, timeout=30) # There is one more transaction in the mempool, which is a random txn from the ds_txs set assert_equal(conn.rpc.getmempoolinfo()['size'], len(nonstd_txs) + 1) # Only one txn is allowed to be in the mempool from the given ds set. assert_equal( len(self.check_intersec_with_mempool(conn.rpc, ds_txs)), 1) # # Test Case 4 (TC4). # # - 10 standard txs used (as additional input set) # - 2400 non-standard (with a simple locking script) txs used # - 100 double spend txs used # - 1 peer connected to node0 # All input txs are randomly suffled before sending. # # The number of txs used in the test case. tc4_1_txs_num = 10 tc4_2_txs_num = 2400 ds_txs_num = 100 # Select funding transactions to use: # - tc4_1_txs_num+1 funding transactions are needed in this test case. spend_txs = self.get_front_slice(out, tc4_1_txs_num) spend_txs2 = self.get_front_slice(out, 1) args = ['-checkmempool=0', '-persistmempool=0'] with self.run_node_with_connections( 'TC4: {} std, {} nonstd txs ({} double spends) used (shuffled set).' .format(tc4_1_txs_num, tc4_2_txs_num, ds_txs_num), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. # Create some additional std txs to use. std_txs = self.get_txchains_n(tc4_1_txs_num, 1, spend_txs, CScript(), self.locking_script_1, 2000000, 10) # Create and send generated txs. std_and_nonstd_txs, ds_txs, _ = self.run_scenario2( conn, spend_txs2, tc4_2_txs_num, self.locking_script_2, ds_txs_num, std_txs, shuffle_txs=True) wait_for_ptv_completion(conn, len(std_and_nonstd_txs) + 1) # All txs from the std_and_nonstd_txs result set should be accepted self.check_mempool_with_subset(conn.rpc, std_and_nonstd_txs, timeout=30) # There is one more transaction in the mempool. It is a random txn from the ds_txs set assert_equal(conn.rpc.getmempoolinfo()['size'], len(std_and_nonstd_txs) + 1) # Only one txn is allowed to be accepted by the mempool, from the given double spends txn set. assert_equal( len(self.check_intersec_with_mempool(conn.rpc, ds_txs)), 1) # # Test Case 5 (TC5). # # - 24K=10x2400 non-standard txs (with a simple locking script) used # - 1K=10x100 double spend txs used # - 1 peer connected to node0 # From each double spend set only 1 txn is accepted by the mempool. # - Valid non-standard txs are sent first, then double spend txs (this approach maximises a ratio of 'txn-double-spend-detected' reject msgs) # # The number of txs used in a single subset. tc5_txs_num = 2400 ds_txs_num = 100 # The number of subsets used in the test case. tc5_num_of_subsets = 10 # Select funding transactions to use: # - tc5_num_of_subsets funding transaction are needed in this test case. spend_txs = self.get_front_slice(out, tc5_num_of_subsets) args = ['-checkmempool=0', '-persistmempool=0'] with self.run_node_with_connections( 'TC5: {} non-std txs ({} double spends) used.'.format( tc5_txs_num * tc5_num_of_subsets, ds_txs_num * tc5_num_of_subsets), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. nonstd_txs, ds_txs, rejected_txs = self.run_scenario3( conn, spend_txs, tc5_txs_num, self.locking_script_2, ds_txs_num) wait_for_ptv_completion(conn, len(nonstd_txs) + tc5_num_of_subsets, check_interval=0.5) # All txs from the nonstd_txs result set should be accepted self.check_mempool_with_subset(conn.rpc, nonstd_txs, timeout=60) # There are tc5_num_of_subsets more transaction in the mempool (random txns from the ds_txs set) assert_equal(conn.rpc.getmempoolinfo()['size'], len(nonstd_txs) + tc5_num_of_subsets) # Only tc5_num_of_subsets txns are allowed to be in the mempool from the given ds set. assert_equal( len(self.check_intersec_with_mempool(conn.rpc, ds_txs)), tc5_num_of_subsets) # # Test Case 6 (TC6). # # - 24K=10x2400 non-standard txs (with a simple locking script) used # - 1K=10x100 double spend txs used # - 1 peer connected to node0 # From each double spends set only 1 txn is accepted by the mempool. # All input txs are randomly suffled before sending. # - the txs set is shuffeled first so it significantly decreases 'txn-double-spend-detected' reject msgs comparing to TC5 # - in this case 'txn-mempool-conflict' reject reason will mostly occur # # The number of txs used in a single subset. tc6_txs_num = 2400 ds_txs_num = 100 # The number of subsets used in the test case. tc6_num_of_subsets = 10 # Select funding transactions to use: # - tc6_num_of_subsets funding transaction are needed in this test case. spend_txs = self.get_front_slice(out, tc6_num_of_subsets) args = ['-checkmempool=0', '-persistmempool=0'] with self.run_node_with_connections( 'TC6: {} non-std txs ({} double spends) used (shuffled set).'. format(tc6_txs_num * tc6_num_of_subsets, ds_txs_num * tc6_num_of_subsets), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. nonstd_txs, ds_txs, rejected_txs = self.run_scenario3( conn, spend_txs, tc6_txs_num, self.locking_script_2, ds_txs_num, shuffle_txs=True) wait_for_ptv_completion(conn, len(nonstd_txs) + tc6_num_of_subsets, check_interval=0.5) # All txs from the nonstd_txs result set should be accepted self.check_mempool_with_subset(conn.rpc, nonstd_txs, timeout=60) # There are tc6_num_of_subsets more transaction in the mempool (random txns from the ds_txs set) assert_equal(conn.rpc.getmempoolinfo()['size'], len(nonstd_txs) + tc6_num_of_subsets) # Only tc6_num_of_subsets txns are allowed to be in the mempool from the given ds set. assert_equal( len(self.check_intersec_with_mempool(conn.rpc, ds_txs)), tc6_num_of_subsets)
def get_tests(self): node = self.nodes[0] self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(15): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE // 2) yield accepted() # Start moving MTP forward bfork = block(5555, out[15], block_size=8 * ONE_MEGABYTE) bfork.nTime = MONOLITH_START_TIME - 1 update_block(5555, []) yield accepted() # Get to one block of the May 15, 2018 HF activation for i in range(5): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) yield test # Check that the MTP is just before the configured fork point. assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'], MONOLITH_START_TIME - 1) # Before we acivate the May 15, 2018 HF, 8MB is the limit. block(4444, spend=out[16], block_size=8 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(5104) # Actiavte the May 15, 2018 HF block(5556) yield accepted() # Now MTP is exactly the fork time. Bigger blocks are now accepted. assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'], MONOLITH_START_TIME) # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(17) # Accept many sigops lots_of_checksigs = CScript( [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block(19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() block(20, spend=out[18], script=lots_of_checksigs, block_size=ONE_MEGABYTE, extra_sigops=1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block( 29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"fatstacks") public_key = private_key.get_pubkey() # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript( [public_key] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_tx(out[22], 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Submit a very large block via RPC large_block = block( 33, spend=out[24], block_size=self.excessive_block_size) node.submitblock(ToHex(large_block))
class PTVRPCTests(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.genesisactivationheight = 600 self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.locking_script = CScript([self.coinbase_pubkey, OP_CHECKSIG]) self.default_args = [ '-debug', '-maxgenesisgracefulperiod=0', '-genesisactivationheight=%d' % self.genesisactivationheight ] self.extra_args = [self.default_args] * self.num_nodes def run_test(self): self.test.run() # Sign a transaction, using the key we know about. # This signs input 0 in tx, which is assumed to be spending output n in spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) def check_mempool(self, rpc, should_be_in_mempool, timeout=20): wait_until(lambda: set(rpc.getrawmempool()) == {t.hash for t in should_be_in_mempool}, timeout=timeout) # Generating transactions in order so first transaction's output will be an input for second transaction def get_chained_transactions(self, spend, num_of_transactions, money_to_spend=5000000000): txns = [] for _ in range(0, num_of_transactions): money_to_spend = money_to_spend - 1000 # one satoshi to fee tx = create_transaction(spend.tx, spend.n, b"", money_to_spend, self.locking_script) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() txns.append(tx) spend = PreviousSpendableOutput(tx, 0) return txns # Create a required number of chains with equal length. def get_txchains_n(self, num_of_chains, chain_length, spend): if num_of_chains > len(spend): raise Exception('Insufficient number of spendable outputs.') txchains = [] for x in range(0, num_of_chains): txchains += self.get_chained_transactions(spend[x], chain_length) return txchains # Test an attempt to resubmit transactions (via rpc interface) which are already known # - received earlier via p2p interface and not processed yet # - use sendrawtransaction rpc interface (a single txn submit) to submit duplicates def run_scenario1(self, conn, num_of_chains, chain_length, spend, allowhighfees=False, dontcheckfee=False, timeout=30): # Create tx chains. txchains = self.get_txchains_n(num_of_chains, chain_length, spend) # Send txns, one by one, through p2p interface. for tx in range(len(txchains)): conn.send_message(msg_tx(txchains[tx])) # Check if there is an expected number of transactions in the validation queues # - this scenario relies on ptv delayed processing # - ptv is required to be paused wait_until(lambda: conn.rpc.getblockchainactivity()["transactions"] == num_of_chains * chain_length, timeout=timeout) # No transactions should be in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) # Resubmit txns through rpc interface # - there should be num_of_chains*chain_length txns detected as known transactions # - due to the fact that all were already received via p2p interface for tx in range(len(txchains)): assert_raises_rpc_error(-26, "txn-already-known", conn.rpc.sendrawtransaction, ToHex(txchains[tx]), allowhighfees, dontcheckfee) # No transactions should be in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) return txchains # An extension to the scenario1. # - submit txns through p2p interface # - resubmit transactions (via rpc interface) which are already known # - create a new block # - use invalidateblock to re-org back # - create a new block # - check if txns are present in the new block def run_scenario2(self, conn, num_of_chains, chain_length, spend, allowhighfees=False, dontcheckfee=False, timeout=60): # Create tx chains. txchains = self.run_scenario1(conn, num_of_chains, chain_length, spend, allowhighfees, dontcheckfee, timeout) wait_for_ptv_completion(conn, len(txchains), timeout=timeout) # Check if txchains txns are in the mempool. self.check_mempool(conn.rpc, set(txchains), timeout=60) # Check if there is only num_of_chains * chain_length txns in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], len(txchains)) # Generate a single block. mined_block1 = conn.rpc.generate(1) # Mempool should be empty, all txns in the block. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool. conn.rpc.invalidateblock(mined_block1[0]) # There should be exactly num_of_chains * chain_length txns in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], len(txchains)) self.check_mempool(conn.rpc, set(txchains)) # Generate another block, they should all get mined. mined_block2 = conn.rpc.generate(1) # Mempool should be empty, all txns confirmed. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) # Check if txchains txns are included in the block. mined_block2_details = conn.rpc.getblock(mined_block2[0]) assert_equal(mined_block2_details['num_tx'], len(txchains) + 1) # +1 for coinbase txn. assert_equal( len( set(mined_block2_details['tx']).intersection( t.hash for t in txchains)), len(txchains)) def get_tests(self): rejected_txs = [] def on_reject(conn, msg): rejected_txs.append(msg) # Shorthand for functions block = self.chain.next_block node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # Create a new block block(0, coinbase_pubkey=self.coinbase_pubkey) self.chain.save_spendable_output() yield self.accepted() # Now we need that block to mature so we can spend the coinbase. # Also, move block height on beyond Genesis activation. test = TestInstance(sync_every_block=False) for i in range(600): block(5000 + i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # Collect spendable outputs now to avoid cluttering the code later on. out = [] for i in range(200): out.append(self.chain.get_spendable_output()) self.stop_node(0) # Scenario 1 (TS1). # This test case checks if resubmited transactions (through sendrawtransaction interface) are rejected, # at the early stage of processing (before txn validation is executed). # - 1K txs used # - 1K txns are sent first through the p2p interface (and not processed as ptv is paused) # - allowhighfees=False (default) # - dontcheckfee=False (default) # # Test case config num_of_chains = 10 chain_length = 100 # Node's config args = [ '-txnvalidationasynchrunfreq=10000', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS1: {} chains of length {}. Test duplicates resubmitted via rpc.' .format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out) # Scenario 2 (TS2). # It's an extension to TS1. Resubmit duplicates, then create a new block and check if it is a valid block. # - 100 txs used # - allowhighfees=False (default) # - dontcheckfee=False (default) # # Test case config num_of_chains = 10 chain_length = 10 # Node's config args = [ '-txnvalidationasynchrunfreq=2000', '-blockcandidatevaliditytest=1', # on regtest it's enabled by default but for clarity let's add it explicitly. '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS2: {} chains of length {}. Test duplicates and generate a new block.' .format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario2(conn, num_of_chains, chain_length, out)
class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do the comparison. def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} def add_options(self, parser): super().add_options(parser) parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test.run() def get_tests(self): # shorthand for functions block = lambda *a, **kw: self.chain.next_block( *a, coinbase_key=self.coinbase_key, simple_output=True, **kw) create_and_sign_tx = lambda *a, **kw: create_and_sign_transaction( *a, private_key=self.coinbase_key, **({k: v for k, v in kw.items() if not k == 'private_key'})) update_block = self.chain.update_block tip = self.chain.set_tip accepted = self.accepted rejected = self.rejected self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16)) save_spendable_output = self.chain.save_spendable_output get_spendable_output = self.chain.get_spendable_output # Create a new block block(0) yield accepted() test, out, _ = prepare_init_chain(self.chain, 99, 33) yield test # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) block(1, spend=out[0]) save_spendable_output() yield accepted() block(2, spend=out[1]) yield accepted() save_spendable_output() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. tip(1) b3 = block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) block(4, spend=out[2]) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out[2]) save_spendable_output() yield rejected() block(6, spend=out[3]) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out[2]) yield rejected() block(8, spend=out[4]) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out[3]) yield rejected() block(11, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out[3]) save_spendable_output() b13 = block(13, spend=out[4]) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is # delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out[5], additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS_PER_MB and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) tip(13) block(15, spend=out[5], script=lots_of_checksigs) yield accepted() save_spendable_output() # Test that a block with too many checksigs is rejected too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB)) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out[6]) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) block(20, spend=out[7]) yield rejected( RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out[6]) yield rejected() block(22, spend=out[5]) yield rejected() # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) yield accepted() save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = block(27, spend=out[7]) yield rejected(False) # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = block(29, spend=out[7]) yield rejected(False) # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted() save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b36 (11) # \-> b34 (10) # \-> b32 (9) # # MULTISIG: each op code counts as 20 sigops. To create the edge case, # pack another 19 sigops at the end. lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) b31 = block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS_PER_MB) yield accepted() save_spendable_output() # this goes over the limit because the coinbase has one sigop too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) b32 = block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKMULTISIGVERIFY tip(31) lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) block(33, spend=out[9], script=lots_of_multisigs) yield accepted() save_spendable_output() too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) block(34, spend=out[10], script=too_many_multisigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKSIGVERIFY tip(33) lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) b35 = block(35, spend=out[10], script=lots_of_checksigs) yield accepted() save_spendable_output() too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB)) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block tip(35) b37 = block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) tx = create_and_sign_tx(out[11].tx, out[11].n, 0) b37 = update_block(37, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid tip(35) block(38, spend=txout_b37) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Check P2SH SigOp counting # # # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) # \-> b40 (12) # # b39 - create some P2SH outputs that will require 6 sigops to spend: # # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # tip(35) b39 = block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] tx = create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append( CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) sign_tx(tx, spend.tx, spend.n, self.coinbase_key) tx.rehash() b39 = update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest # to OP_TRUE tx_new = None tx_last = tx total_size = len(b39.serialize()) while (total_size < LEGACY_MAX_BLOCK_SIZE): tx_new = create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append( CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= LEGACY_MAX_BLOCK_SIZE: break b39.vtx.append(tx_new) # add tx to block tx_last = tx_new b39_outputs += 1 b39 = update_block(39, []) yield accepted() save_spendable_output() # Test sigops in P2SH redeem scripts # # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. # The first tx has one sigop and then at the end we add 2 more to put us just over the max. # # b41 does the same, less one, so it has the maximum sigops permitted. # tip(39) b40 = block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxes = (MAX_BLOCK_SIGOPS_PER_MB - sigops) // b39_sigops_per_output assert_equal(numTxes <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) lastAmount = b40.vtx[1].vout[0].nValue new_txs = [] for i in range(1, numTxes + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) # second input is corresponding P2SH output from b39 tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the # signature hash function sighash = SignatureHashForkId(redeem_script, tx, 1, SIGHASH_ALL | SIGHASH_FORKID, lastAmount) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig tx.rehash() new_txs.append(tx) lastOutpoint = COutPoint(tx.sha256, 0) lastAmount = tx.vout[0].nValue b40_sigops_to_fill = MAX_BLOCK_SIGOPS_PER_MB - \ (numTxes * b39_sigops_per_output + sigops) + 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) tx.rehash() new_txs.append(tx) update_block(40, new_txs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # same as b40, but one less sigop tip(39) b41 = block(41, spend=None) update_block(41, b40.vtx[1:-1]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) tx.rehash() update_block(41, [tx]) yield accepted() # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # tip(39) block(42, spend=out[12]) yield rejected() save_spendable_output() block(43, spend=out[13]) yield accepted() save_spendable_output() # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. height = self.chain.block_heights[self.chain.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.chain.tip.nTime + 1 b44.hashPrevBlock = self.chain.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.chain.tip = b44 self.chain.block_heights[b44.sha256] = height self.chain.blocks[44] = b44 yield accepted() # A block with a non-coinbase as the first tx non_coinbase = create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.chain.tip.nTime + 1 b45.hashPrevBlock = self.chain.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.chain.block_heights[ b45.sha256] = self.chain.block_heights[self.chain.tip.sha256] + 1 self.chain.tip = b45 self.chain.blocks[45] = b45 yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with no txns tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.chain.block_heights[ b46.sha256] = self.chain.block_heights[b44.sha256] + 1 self.chain.tip = b46 assert 46 not in self.chain.blocks self.chain.blocks[46] = b46 s = ser_uint256(b46.hashMerkleRoot) yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with invalid work tip(44) b47 = block(47, do_solve_block=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: # changed > to < b47.nNonce += 1 b47.rehash() yield rejected(RejectResult(16, b'high-hash')) # A block with timestamp > 2 hrs in the future tip(44) b48 = block(48, do_solve_block=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() yield rejected(RejectResult(16, b'time-too-new')) # A block with an invalid merkle hash tip(44) b49 = block(49) b49.hashMerkleRoot += 1 b49.solve() yield rejected(RejectResult(16, b'bad-txnmrklroot')) # A block with an incorrect POW limit tip(44) b50 = block(50) b50.nBits = b50.nBits - 1 b50.solve() yield rejected(RejectResult(16, b'bad-diffbits')) # A block with two coinbase txns tip(44) b51 = block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = update_block(51, [cb2]) yield rejected(RejectResult(16, b'bad-tx-coinbase')) # A block w/ duplicate txns # Note: txns have to be in the right position in the merkle tree to # trigger this error tip(44) b52 = block(52, spend=out[15]) tx = create_tx(b52.vtx[1], 0, 1) b52 = update_block(52, [tx, tx]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # tip(43) block(53, spend=out[14]) yield rejected() # rejected since b44 is at same height save_spendable_output() # invalid timestamp (b35 is 5 blocks back, so its time is # MedianTimePast) b54 = block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() yield rejected(RejectResult(16, b'time-too-old')) # valid timestamp tip(53) b55 = block(55, spend=out[15]) b55.nTime = b35.nTime update_block(55, []) yield accepted() save_spendable_output() # Test CVE-2012-2459 # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end tip(55) b57 = block(57) tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) b57 = update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx tip(55) b56 = copy.deepcopy(b57) self.chain.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = update_block(56, [tx1]) assert_equal(b56.hash, b57.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # b57p2 - a good block with 6 tx'es, don't submit until end tip(55) b57p2 = block("57p2") tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) tx2 = create_tx(tx1, 0, 1) tx3 = create_tx(tx2, 0, 1) tx4 = create_tx(tx3, 0, 1) b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's tip(55) b56p2 = copy.deepcopy(b57p2) self.chain.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) assert_equal(len(b56p2.vtx), 6) b56p2 = update_block("b56p2", [tx3, tx4]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip("57p2") yield accepted() tip(57) yield rejected() # rejected because 57p2 seen first save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range tip(57) b58 = block(58, spend=out[17]) tx = CTransaction() assert (len(out[17].tx.vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) tx.calc_sha256() b58 = update_block(58, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # tx with output value > input value out of range tip(57) b59 = block(59) tx = create_and_sign_tx(out[17].tx, out[17].n, 51 * COIN) b59 = update_block(59, [tx]) yield rejected(RejectResult(16, b'bad-txns-in-belowout')) # reset to good chain tip(57) b60 = block(60, spend=out[17]) yield accepted() save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # tip(60) b61 = block(61, spend=out[18]) b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[ 0].scriptSig # equalize the coinbases b61.vtx[0].rehash() b61 = update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) yield rejected(RejectResult(16, b'bad-txns-BIP30')) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # tip(60) b62 = block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final assert (out[18].n < len(out[18].tx.vout)) tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert (tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() b62 = update_block(62, [tx]) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # tip(60) b63 = block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = update_block(63, []) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # tip(60) regular_block = block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.chain.blocks["64a"] = b64a self.chain.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) yield TestInstance([[self.chain.tip, None]]) # comptool workaround: to make sure b64 is delivered, manually erase # b64a from blockstore self.test.block_store.erase(b64a.sha256) tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.chain.blocks[64] = b64 update_block(64, []) yield accepted() save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # tip(64) b65 = block(65) tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 0) update_block(65, [tx1, tx2]) yield accepted() save_spendable_output() # Attempt to spend an output created later in the same block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b66 (20) tip(65) b66 = block(66) tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) update_block(66, [tx2, tx1]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # tip(65) b67 = block(67) tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) tx3 = create_and_sign_tx(tx1, 0, 2) update_block(67, [tx1, tx2, tx3]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # tip(65) b68 = block(68, additional_coinbase_value=10) tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) update_block(68, [tx]) yield rejected(RejectResult(16, b'bad-cb-amount')) tip(65) b69 = block(69, additional_coinbase_value=10) tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) update_block(69, [tx]) yield accepted() save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # tip(69) block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c" ) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) update_block(70, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. # tip(69) b72 = block(72) tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) tx2 = create_and_sign_tx(tx1, 0, 1) b72 = update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) b71.vtx.append(tx2) # add duplicate tx2 self.chain.block_heights[b71.sha256] = self.chain.block_heights[ b69.sha256] + 1 # b71 builds off b69 self.chain.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) tip(71) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip(72) yield accepted() save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS_PER_MB # # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b** (22) # # b73 - tx with excessive sigops that are placed after an excessively large script element. # The purpose of the test is to make sure those sigops are counted. # # script is a bytearray of size 20,526 # # bytearray[0-19,998] : OP_CHECKSIG # bytearray[19,999] : OP_PUSHDATA4 # bytearray[20,000-20,003]: 521 (max_script_element_size_before_genesis+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) # tip(72) b73 = block(73) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 a[MAX_BLOCK_SIGOPS_PER_MB] = element_size % 256 a[MAX_BLOCK_SIGOPS_PER_MB + 1] = element_size // 256 a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0 a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0 tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b73 = update_block(73, [tx]) assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. # # The invalid script element is that the push_data indicates that # there will be a large amount of data (0xffffff bytes), but we only # provide a much smaller number. These bytes are CHECKSIGS so they would # cause b75 to fail for excessive sigops, if those bytes were counted. # # b74 fails because we put MAX_BLOCK_SIGOPS_PER_MB+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the element # # tip(72) b74 = block(74) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xfe a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 4] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b74 = update_block(74, [tx]) yield rejected(RejectResult(16, b'bad-blk-sigops')) tip(72) b75 = block(75) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 42 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b75 = update_block(75, [tx]) yield accepted() save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not # counted tip(75) b76 = block(76) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 + 5 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) b76 = update_block(76, [tx]) yield accepted() save_spendable_output() # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) # tip(76) block(77) tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10 * COIN) update_block(77, [tx77]) yield accepted() save_spendable_output() block(78) tx78 = create_tx(tx77, 0, 9 * COIN) update_block(78, [tx78]) yield accepted() block(79) tx79 = create_tx(tx78, 0, 8 * COIN) update_block(79, [tx79]) yield accepted() # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) tip(77) block(80, spend=out[25]) yield rejected() save_spendable_output() block(81, spend=out[26]) yield rejected() # other chain is same length save_spendable_output() block(82, spend=out[27]) yield accepted() # now this chain is longer, triggers re-org save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert (tx78.hash in mempool) assert (tx79.hash in mempool) # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # b83 = block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() update_block(83, [tx1, tx2]) yield accepted() save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # # b84 = block(84) tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() sign_tx(tx1, out[29].tx, out[29].n, self.coinbase_key) tx1.rehash() tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) update_block(84, [tx1, tx2, tx3, tx4, tx5]) yield accepted() save_spendable_output() tip(83) block(85, spend=out[29]) yield rejected() block(86, spend=out[30]) yield accepted() tip(84) block(87, spend=out[30]) yield rejected() save_spendable_output() block(88, spend=out[31]) yield accepted() save_spendable_output() # trying to spend the OP_RETURN output is rejected block("89a", spend=out[32]) tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) update_block("89a", [tx]) yield rejected() # Test re-org of a week's worth of blocks (1088 blocks) # This test takes a minute or two and can be accomplished in memory # if self.options.runbarelyexpensive: tip(88) LARGE_REORG_SIZE = 1088 test1 = TestInstance(sync_every_block=False) spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) test1.blocks_and_transactions.append([self.chain.tip, True]) save_spendable_output() spend = self.chain.get_spendable_output() yield test1 chain1_tip = i # now create alt chain of same length tip(88) test2 = TestInstance(sync_every_block=False) for i in range(89, LARGE_REORG_SIZE + 89): block("alt" + str(i)) test2.blocks_and_transactions.append([self.chain.tip, False]) yield test2 # extend alt chain to trigger re-org block("alt" + str(chain1_tip + 1)) yield accepted() # ... and re-org back to the first chain tip(chain1_tip) block(chain1_tip + 1) yield rejected() block(chain1_tip + 2) yield accepted() chain1_tip += 2
class P2SH(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 3 self.setup_clean_chain = True self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.genesisactivationheight = 150 self.extra_args = [['-acceptnonstdtxn=0', '-banscore=1000000', f'-genesisactivationheight={self.genesisactivationheight}']] * 3 def run_test(self): self.test.run() def get_tests(self): # shorthand for functions block = self.chain.next_block node0 = self.nodes[0] node1 = self.nodes[1] node2 = self.nodes[2] self.chain.set_genesis_hash(int(node1.getbestblockhash(), 16)) # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(0,100): block(i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # create two addresses on the node0 address1 = node0.getnewaddress() scriptPubKey1 = node0.validateaddress(address1)["scriptPubKey"] address2 = node0.getnewaddress() scriptPubKey2 = node0.validateaddress(address2)["scriptPubKey"] # import P2SH(P2PKH) on node1 and node2 # have to do in this way because it seems that we can't create P2PKH address and later add P2SH(P2PKH) to the same private key node1.importaddress(scriptPubKey1, "x", True, True) # importing script, not key node1.importprivkey(node0.dumpprivkey(address1)) node2.importaddress(scriptPubKey2, "x", True, True) # importing script, not key node2.importprivkey(node0.dumpprivkey(address2)) out = [self.chain.get_spendable_output() for _ in range(50)] # Create a p2sh transactions def new_P2SH_tx(scriptPubKey): output = out.pop(0) redeem_script = CScript(hex_str_to_bytes(scriptPubKey)) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) return create_and_sign_transaction(spend_tx=output.tx, n=output.n, value=output.tx.vout[0].nValue-100, private_key=self.coinbase_key, script=p2sh_script) # Add the transactions to the block assert node0.getblockcount() < self.genesisactivationheight, "We must be before genesis" block(100) new_tx1 = new_P2SH_tx(scriptPubKey1) self.chain.update_block(100, [new_tx1]) # sending funds to P2SH address BEFORE genesis yield self.accepted() current_height = node1.getblockcount() for i in range(self.genesisactivationheight - current_height): block(101+i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test assert node0.getblockcount() >= self.genesisactivationheight, "We must be after genesis" block(150) new_tx2 = new_P2SH_tx(scriptPubKey2) self.chain.update_block(150, [new_tx2]) # sending funds to P2SH address AFTER genesis yield self.rejected(RejectResult(16, b'bad-txns-vout-p2sh')) self.chain.set_tip(149) balance1 = node1.getbalance("*", 1, False) assert balance1 * COIN == new_tx1.vout[0].nValue, "Wallet has registered pre genesis transaction." balance2 = node2.getbalance("*", 1, False) assert balance2 * COIN == 0, "No funds in wallet as transaction is not accepted." # Pre genesis P2SH transaction can be spent through wallet node1.sendtoaddress(node0.getnewaddress(), balance1 - 1) balance1_new = node1.getbalance("*", 1, False) assert balance1 > balance1_new, "Pre genesis P2SH is spent."
def run_test(self): node, = self.nodes self.bootstrap_p2p() tip = self.getbestblock(node) self.log.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) node.p2p.send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] self.log.info("Mature the blocks and get out of IBD.") node.generate(100) tip = self.getbestblock(node) self.log.info("Setting up spends to test and mining the fundings.") fundings = [] # Generate a key pair privkeybytes = b"Schnorr!" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() def create_fund_and_spend_tx(multi=False, sig='schnorr'): spendfrom = spendable_outputs.pop() if multi: script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) else: script = CScript([public_key, OP_CHECKSIG]) value = spendfrom.vout[0].nValue # Fund transaction txfund = create_transaction(spendfrom, 0, b'', value, script) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId(script, txspend, 0, sighashtype, value) if sig == 'schnorr': txsig = schnorr.sign(privkeybytes, sighash) + hashbyte elif sig == 'ecdsa': txsig = private_key.sign(sighash) + hashbyte elif isinstance(sig, bytes): txsig = sig + hashbyte if multi: txspend.vin[0].scriptSig = CScript([b'', txsig]) else: txspend.vin[0].scriptSig = CScript([txsig]) txspend.rehash() return txspend schnorrchecksigtx = create_fund_and_spend_tx() schnorrmultisigtx = create_fund_and_spend_tx(multi=True) ecdsachecksigtx = create_fund_and_spend_tx(sig='ecdsa') sig64checksigtx = create_fund_and_spend_tx(sig=sig64) sig64multisigtx = create_fund_and_spend_tx(multi=True, sig=sig64) tip = self.build_block(tip, fundings) node.p2p.send_blocks_and_test([tip], node) self.log.info("Typical ECDSA and Schnorr CHECKSIG are valid.") node.p2p.send_txs_and_test([schnorrchecksigtx, ecdsachecksigtx], node) # They get mined as usual. node.generate(1) tip = self.getbestblock(node) # Make sure they are in the block, and mempool is now empty. txhashes = set([schnorrchecksigtx.hash, ecdsachecksigtx.hash]) assert txhashes.issubset(tx.rehash() for tx in tip.vtx) assert not node.getrawmempool() self.log.info("Schnorr in multisig is rejected with mandatory error.") assert_raises_rpc_error(-26, rpc_error(**SCHNORR_MULTISIG_ERROR), node.sendrawtransaction, ToHex(schnorrmultisigtx)) # And it is banworthy. self.check_for_ban_on_rejected_tx(schnorrmultisigtx, **SCHNORR_MULTISIG_ERROR) # And it can't be mined self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorrmultisigtx]), **BADINPUTS_ERROR) self.log.info("Bad 64-byte sig is rejected with mandatory error.") # In CHECKSIG it's invalid Schnorr and hence NULLFAIL. assert_raises_rpc_error(-26, rpc_error(**NULLFAIL_ERROR), node.sendrawtransaction, ToHex(sig64checksigtx)) # In CHECKMULTISIG it's invalid length and hence BAD_LENGTH. assert_raises_rpc_error(-26, rpc_error(**SCHNORR_MULTISIG_ERROR), node.sendrawtransaction, ToHex(sig64multisigtx)) # Sending these transactions is banworthy. self.check_for_ban_on_rejected_tx(sig64checksigtx, **NULLFAIL_ERROR) self.check_for_ban_on_rejected_tx(sig64multisigtx, **SCHNORR_MULTISIG_ERROR) # And they can't be mined either... self.check_for_ban_on_rejected_block( self.build_block(tip, [sig64checksigtx]), **BADINPUTS_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [sig64multisigtx]), **BADINPUTS_ERROR)
def run_test(self): # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal( self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101)
def run_test(self): node = self.nodes[0] node.add_p2p_connection(P2PDataStore()) # Set the blocksize to 2MB as initial condition node.setexcessiveblock(self.excessive_block_size) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() node.p2p.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(105): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() node.p2p.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Accept many sigops lots_of_checksigs = CScript( [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block(19, spend=out[0], script=lots_of_checksigs, block_size=ONE_MEGABYTE) node.p2p.send_blocks_and_test([self.tip], node) block(20, spend=out[1], script=lots_of_checksigs, block_size=ONE_MEGABYTE, extra_sigops=1) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-sigops') # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[1], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) node.p2p.send_blocks_and_test([self.tip], node) # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[2], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) node.p2p.send_blocks_and_test([self.tip], node) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[3], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-sigops') # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[3], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-sigops') # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[3], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) node.p2p.send_blocks_and_test([self.tip], node) # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[4], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) node.p2p.send_blocks_and_test([self.tip], node) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[5], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-sigops') # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[5], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-sigops') # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block( 29, spend=out[6], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-txn-sigops') # Rewind bad block tip(26) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"fatstacks") public_key = private_key.get_pubkey() # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript( [public_key] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_tx(out[6], 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) node.p2p.send_blocks_and_test([self.tip], node) # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[7], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-txn-sigops') # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[8], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) node.p2p.send_blocks_and_test([self.tip], node) # Ensure that a coinbase with too many sigops is forbidden, even if it # doesn't push the total block count over the limit. b33 = block(33, spend=out[9], block_size=2 * ONE_MEGABYTE) # 20001 sigops b33.vtx[0].vout.append( CTxOut(0, CScript([OP_CHECKMULTISIG] * 1000 + [OP_CHECKDATASIG]))) update_block(33, []) node.p2p.send_blocks_and_test( [b33], node, success=False, reject_reason='bad-txn-sigops') # 20000 sigops b33.vtx[0].vout[-1].scriptPubKey = CScript([OP_CHECKMULTISIG] * 1000) update_block(33, []) node.p2p.send_blocks_and_test([b33], node)
class PIVX_FakeStakeTest(BitcoinTestFramework): def set_test_params(self): ''' Setup test environment :param: :return: ''' self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-staking=1', '-debug=net']]*self.num_nodes def setup_network(self): ''' Can't rely on syncing all the nodes when staking=1 :param: :return: ''' self.setup_nodes() for i in range(self.num_nodes - 1): for j in range(i+1, self.num_nodes): connect_nodes_bi(self.nodes, i, j) def init_test(self): ''' Initializes test parameters :param: :return: ''' self.log.info("\n\n*** Starting %s ***\n------------------------\n%s\n", self.__class__.__name__, self.description) # Global Test parameters (override in run_test) self.DEFAULT_FEE = 0.1 # Spam blocks to send in current test self.NUM_BLOCKS = 30 # Setup the p2p connections and start up the network thread. self.test_nodes = [] for i in range(self.num_nodes): self.test_nodes.append(TestNode()) self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i)) network_thread_start() # Start up network handling in another thread self.node = self.nodes[0] # Let the test nodes get in sync for i in range(self.num_nodes): self.test_nodes[i].wait_for_verack() def run_test(self): ''' Performs the attack of this test - run init_test first. :param: :return: ''' self.description = "" self.init_test() return def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}): ''' creates a block to spam the network with :param hashPrevBlock: (hex string) hash of previous block stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake height: (int) block height fStakeDoubleSpent: (bool) spend the coinstake input inside the block fZPoS: (bool) stake the block with zerocoin spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake :return block: (CBlock) generated block ''' self.log.info("Creating Spam Block") # If not given inputs to create spam txes, use a copy of the staking inputs if len(spendingPrevOuts) == 0: spendingPrevOuts = dict(stakingPrevOuts) # Get current time current_time = int(time.time()) nTime = current_time & 0xfffffff0 # Create coinbase TX # Even if PoS blocks have empty coinbase vout, the height is required for the vin script coinbase = create_coinbase(height) coinbase.vout[0].nValue = 0 coinbase.vout[0].scriptPubKey = b"" coinbase.nTime = nTime coinbase.rehash() # Create Block with coinbase block = create_block(int(hashPrevBlock, 16), coinbase, nTime) # Find valid kernel hash - Create a new private key used for block signing. if not block.solve_stake(stakingPrevOuts): raise Exception("Not able to solve for any prev_outpoint") self.log.info("Stake found. Signing block...") # Sign coinstake TX and add it to the block signed_stake_tx = self.sign_stake_tx(block, stakingPrevOuts[block.prevoutStake][0], fZPoS) block.vtx.append(signed_stake_tx) # Remove coinstake input prevout unless we want to try double spending in the same block. # Skip for zPoS as the spendingPrevouts are just regular UTXOs if not fZPoS and not fStakeDoubleSpent: del spendingPrevOuts[block.prevoutStake] # remove a random prevout from the list # (to randomize block creation if the same height is picked two times) del spendingPrevOuts[choice(list(spendingPrevOuts))] # Create spam for the block. Sign the spendingPrevouts self.log.info("Creating spam TXes...") for outPoint in spendingPrevOuts: value_out = int(spendingPrevOuts[outPoint][0] - self.DEFAULT_FEE * COIN) tx = create_transaction(outPoint, b"", value_out, nTime, scriptPubKey=CScript([self.block_sig_key.get_pubkey(), OP_CHECKSIG])) # sign txes signed_tx_hex = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex'] signed_tx = CTransaction() signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex))) block.vtx.append(signed_tx) # Get correct MerkleRoot and rehash block block.hashMerkleRoot = block.calc_merkle_root() block.rehash() # Sign block with coinstake key and return it block.sign_block(self.block_sig_key) return block def spend_utxo(self, utxo, address_list): ''' spend amount from previously unspent output to a provided address :param utxo: (JSON) returned from listunspent used as input addresslist: (string) destination address :return: txhash: (string) tx hash if successful, empty string otherwise ''' try: inputs = [{"txid":utxo["txid"], "vout":utxo["vout"]}] out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)/len(address_list) outputs = {} for address in address_list: outputs[address] = out_amount spendingTx = self.node.createrawtransaction(inputs, outputs) spendingTx_signed = self.node.signrawtransaction(spendingTx) if spendingTx_signed["complete"]: txhash = self.node.sendrawtransaction(spendingTx_signed["hex"]) return txhash else: self.log.warning("Error: %s" % str(spendingTx_signed["errors"])) return "" except JSONRPCException as e: self.log.error("JSONRPCException: %s" % str(e)) return "" def spend_utxos(self, utxo_list, address_list = []): ''' spend utxos to provided list of addresses or 10 new generate ones. :param utxo_list: (JSON list) returned from listunspent used as input address_list: (string list) [optional] recipient PIVX addresses. if not set, 10 new addresses will be generated from the wallet for each tx. :return: txHashes (string list) tx hashes ''' txHashes = [] # If not given, get 10 new addresses from self.node wallet if address_list == []: for i in range(10): address_list.append(self.node.getnewaddress()) for utxo in utxo_list: try: # spend current utxo to provided addresses txHash = self.spend_utxo(utxo, address_list) if txHash != "": txHashes.append(txHash) except JSONRPCException as e: self.log.error("JSONRPCException: %s" % str(e)) continue return txHashes def stake_amplification_step(self, utxo_list, address_list = []): ''' spends a list of utxos providing the list of new outputs :param utxo_list: (JSON list) returned from listunspent used as input address_list: (string list) [optional] recipient PIVX addresses. :return: new_utxos: (JSON list) list of new (valid) inputs after the spends ''' self.log.info("--> Stake Amplification step started with %d UTXOs", len(utxo_list)) txHashes = self.spend_utxos(utxo_list, address_list) num_of_txes = len(txHashes) new_utxos = [] if num_of_txes> 0: self.log.info("Created %d transactions...Mining 2 blocks to include them..." % num_of_txes) self.node.generate(2) time.sleep(2) new_utxos = self.node.listunspent() self.log.info("Amplification step produced %d new \"Fake Stake\" inputs:" % len(new_utxos)) return new_utxos def stake_amplification(self, utxo_list, iterations, address_list = []): ''' performs the "stake amplification" which gives higher chances at finding fake stakes :param utxo_list: (JSON list) returned from listunspent used as input iterations: (int) amount of stake amplification steps to perform address_list: (string list) [optional] recipient PIVX addresses. :return: all_inputs: (JSON list) list of all spent inputs ''' self.log.info("** Stake Amplification started with %d UTXOs", len(utxo_list)) valid_inputs = utxo_list all_inputs = [] for i in range(iterations): all_inputs = all_inputs + valid_inputs old_inputs = valid_inputs valid_inputs = self.stake_amplification_step(old_inputs, address_list) self.log.info("** Stake Amplification ended with %d \"fake\" UTXOs", len(all_inputs)) return all_inputs def sign_stake_tx(self, block, stake_in_value, fZPoS=False): ''' signs a coinstake transaction :param block: (CBlock) block with stake to sign stake_in_value: (int) staked amount fZPoS: (bool) zerocoin stake :return: stake_tx_signed: (CTransaction) signed tx ''' self.block_sig_key = CECKey() if fZPoS: self.log.info("Signing zPoS stake...") # Create raw zerocoin stake TX (signed) raw_stake = self.node.createrawzerocoinstake(block.prevoutStake) stake_tx_signed_raw_hex = raw_stake["hex"] # Get stake TX private key to sign the block with stake_pkey = raw_stake["private-key"] self.block_sig_key.set_compressed(True) self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey)) else: # Create a new private key and get the corresponding public key self.block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff))) pubkey = self.block_sig_key.get_pubkey() # Create the raw stake TX (unsigned) scriptPubKey = CScript([pubkey, OP_CHECKSIG]) outNValue = int(stake_in_value + 2*COIN) stake_tx_unsigned = CTransaction() stake_tx_unsigned.nTime = block.nTime stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake)) stake_tx_unsigned.vin[0].nSequence = 0xffffffff stake_tx_unsigned.vout.append(CTxOut()) stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey)) # Sign the stake TX stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex'] # Deserialize the signed raw tx into a CTransaction object and return it stake_tx_signed = CTransaction() stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))) return stake_tx_signed def get_prevouts(self, utxo_list, blockHeight, zpos=False): ''' get prevouts (map) for each utxo in a list :param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input <if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input blockHeight: (int) height of the previous block zpos: (bool) type of utxo_list :return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary) map outpoints to amount, block_time, nStakeModifier, hashStake ''' zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000] stakingPrevOuts = {} for utxo in utxo_list: if zpos: # get mint checkpoint checkpointHeight = blockHeight - 200 checkpointBlock = self.node.getblock(self.node.getblockhash(checkpointHeight), True) checkpoint = int(checkpointBlock['acc_checkpoint'], 16) # parse checksum and get checksumblock pos = zerocoinDenomList.index(utxo['denomination']) checksum = (checkpoint >> (32 * (len(zerocoinDenomList) - 1 - pos))) & 0xFFFFFFFF checksumBlock = self.node.getchecksumblock(hex(checksum), utxo['denomination'], True) # get block hash and block time txBlockhash = checksumBlock['hash'] txBlocktime = checksumBlock['time'] else: # get raw transaction for current input utxo_tx = self.node.getrawtransaction(utxo['txid'], 1) # get block hash and block time txBlocktime = utxo_tx['blocktime'] txBlockhash = utxo_tx['blockhash'] # get Stake Modifier stakeModifier = int(self.node.getblock(txBlockhash)['modifier'], 16) # assemble prevout object utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos) return stakingPrevOuts def log_data_dir_size(self): ''' Prints the size of the '/regtest/blocks' directory. :param: :return: ''' init_size = dir_size(self.node.datadir + "/regtest/blocks") self.log.info("Size of data dir: %s kilobytes" % str(init_size)) def test_spam(self, name, staking_utxo_list, fRandomHeight=False, randomRange=0, randomRange2=0, fDoubleSpend=False, fMustPass=False, fZPoS=False, spending_utxo_list=[]): ''' General method to create, send and test the spam blocks :param name: (string) chain branch (usually either "Main" or "Forked") staking_utxo_list: (string list) utxos to use for staking fRandomHeight: (bool) send blocks at random height randomRange: (int) if fRandomHeight=True, height is >= current-randomRange randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2 fDoubleSpend: (bool) if true, stake input is double spent in block.vtx fMustPass: (bool) if true, the blocks must be stored on disk fZPoS: (bool) stake the block with zerocoin spending_utxo_list: (string list) utxos to use for spending :return: err_msgs: (string list) reports error messages from the test or an empty list if test is successful ''' # Create empty error messages list err_msgs = [] # Log initial datadir size self.log_data_dir_size() # Get latest block number and hash block_count = self.node.getblockcount() pastBlockHash = self.node.getblockhash(block_count) randomCount = block_count self.log.info("Current height: %d" % block_count) for i in range(0, self.NUM_BLOCKS): if i !=0: self.log.info("Sent %d blocks out of %d" % (i, self.NUM_BLOCKS)) # if fRandomHeight=True get a random block number (in range) and corresponding hash if fRandomHeight: randomCount = randint(block_count - randomRange, block_count - randomRange2) pastBlockHash = self.node.getblockhash(randomCount) # Get spending prevouts and staking prevouts for the height of current block current_block_n = randomCount + 1 stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS) spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount) # Create the spam block block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n, fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts) # Log time and size of the block block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime)) block_size = len(block.serialize())/1000 self.log.info("Sending block %d [%s...] - nTime: %s - Size (kb): %.2f", current_block_n, block.hash[:7], block_time, block_size) # Try submitblock var = self.node.submitblock(bytes_to_hex_str(block.serialize())) time.sleep(1) if (not fMustPass and var not in [None, "bad-txns-invalid-zpiv"]) or (fMustPass and var != "inconclusive"): self.log.error("submitblock [fMustPass=%s] result: %s" % (str(fMustPass), str(var))) err_msgs.append("submitblock %d: %s" % (current_block_n, str(var))) # Try sending the message block msg = msg_block(block) try: self.test_nodes[0].handle_connect() self.test_nodes[0].send_message(msg) time.sleep(2) block_ret = self.node.getblock(block.hash) if not fMustPass and block_ret is not None: self.log.error("Error, block stored in %s chain" % name) err_msgs.append("getblock %d: result not None" % current_block_n) if fMustPass: if block_ret is None: self.log.error("Error, block NOT stored in %s chain" % name) err_msgs.append("getblock %d: result is None" % current_block_n) else: self.log.info("Good. Block IS stored on disk.") except JSONRPCException as e: exc_msg = str(e) if exc_msg == "Can't read block from disk (-32603)": if fMustPass: self.log.warning("Bad! Block was NOT stored to disk.") err_msgs.append(exc_msg) else: self.log.info("Good. Block was not stored on disk.") else: self.log.warning(exc_msg) err_msgs.append(exc_msg) except Exception as e: exc_msg = str(e) self.log.error(exc_msg) err_msgs.append(exc_msg) self.log.info("Sent all %s blocks." % str(self.NUM_BLOCKS)) # Log final datadir size self.log_data_dir_size() # Return errors list return err_msgs
class MemepoolAcceptingTransactionsDuringReorg(BitcoinTestFramework): def __init__(self, *a, **kw): super(MemepoolAcceptingTransactionsDuringReorg, self).__init__(*a, **kw) self.private_key = CECKey() self.private_key.set_secretbytes(b"fatstacks") self.public_key = self.private_key.get_pubkey() def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def setup_network(self): self.setup_nodes() def setup_nodes(self): self.add_nodes(self.num_nodes) long_eval_script = [ bytearray(b"x" * 300000), bytearray(b"y" * 290000), OP_MUL, OP_DROP ] def create_tx(self, outpoints, noutput, feerate, make_long_eval_script=False): """creates p2pk transaction always using the same key (created in constructor), if make_long_eval_script is set we are prepending long evaluating script to the locking script """ pre_script = MemepoolAcceptingTransactionsDuringReorg.long_eval_script if make_long_eval_script else [] tx = CTransaction() total_input = 0 for parent_tx, n in outpoints: tx.vin.append( CTxIn(COutPoint(parent_tx.sha256, n), CScript([b"0" * 72]), 0xffffffff)) total_input += parent_tx.vout[n].nValue for _ in range(noutput): tx.vout.append( CTxOut(total_input // noutput, CScript(pre_script + [self.public_key, OP_CHECKSIG]))) tx.rehash() tx_size = len(tx.serialize()) fee_per_output = int(tx_size * feerate // noutput) for output in tx.vout: output.nValue -= fee_per_output for input, (parent_tx, n) in zip(tx.vin, outpoints): sighash = SignatureHashForkId(parent_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, parent_tx.vout[n].nValue) input.scriptSig = CScript([ self.private_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) tx.rehash() return tx def make_block(self, txs, parent_hash, parent_height, parent_time): """ creates a block with given transactions""" block = create_block(int(parent_hash, 16), coinbase=create_coinbase(pubkey=self.public_key, height=parent_height + 1), nTime=parent_time + 1) block.vtx.extend(txs) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() block.solve() return block def run_test(self): with self.run_node_with_connections( "Preparation", 0, [ "-blockmintxfee=0.00001", "-relayfee=0.000005", "-checkmempool=0", ], number_of_connections=1) as (conn, ): mining_fee = 1.1 # create block with coinbase coinbase = create_coinbase(pubkey=self.public_key, height=1) first_block = create_block(int(conn.rpc.getbestblockhash(), 16), coinbase=coinbase) first_block.solve() conn.send_message(msg_block(first_block)) wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash, check_interval=0.3) # mature the coinbase conn.rpc.generate(150) funding_tx = self.create_tx([(coinbase, 0)], 2006, mining_fee) conn.send_message(msg_tx(funding_tx)) check_mempool_equals(conn.rpc, [funding_tx]) # generates a root block with our funding transaction conn.rpc.generate(1) # create 2000 standard p2pk transactions a1_txs = [] for m in range(2000): a1_txs.append(self.create_tx([(funding_tx, m)], 1, mining_fee)) a1_spends = [] for a1_tx in a1_txs: a1_spends.append(self.create_tx([(a1_tx, 0)], 1, mining_fee)) # create 2000 standard p2pk transactions which are spending the same outputs as a1_txs double_spend_txs = [] for m in range(2000): double_spend_txs.append( self.create_tx([(funding_tx, m)], 1, mining_fee)) TX_COUNT = 8 # create for pairs of long-evaluating transactions for blocks b1, b2, c1, and c2 long_eval_txs = [] for m in range(2000, 2006): long_eval_txs.append( self.create_tx([(funding_tx, m)], 1, 0.0001, make_long_eval_script=True)) for _ in range(TX_COUNT - 1): long_eval_txs.append( self.create_tx([(long_eval_txs[-1], 0)], 1, 0.0001, make_long_eval_script=True)) root_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash()) root_hash = root_block_info["hash"] root_height = root_block_info["height"] root_time = root_block_info["time"] # create all blocks needed for this test block_a1 = self.make_block(a1_txs, root_hash, root_height, root_time) block_b1 = self.make_block( long_eval_txs[0 * TX_COUNT:1 * TX_COUNT], root_hash, root_height, root_time) block_b2 = self.make_block( long_eval_txs[1 * TX_COUNT:2 * TX_COUNT], block_b1.hash, root_height + 1, root_time + 100) block_c1 = self.make_block( long_eval_txs[2 * TX_COUNT:3 * TX_COUNT], root_hash, root_height, root_time) block_c2 = self.make_block( long_eval_txs[3 * TX_COUNT:4 * TX_COUNT], block_c1.hash, root_height + 1, root_time + 101) block_d1 = self.make_block( long_eval_txs[4 * TX_COUNT:5 * TX_COUNT], root_hash, root_height, root_time) block_d2 = self.make_block( long_eval_txs[5 * TX_COUNT:6 * TX_COUNT], block_d1.hash, root_height + 1, root_time + 102) conn.send_message(msg_block(block_a1)) wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash, check_interval=0.3) with self.run_node_with_connections( "1. Try sending the same transaction that are in the disconnected block during the reorg", 0, [ "-blockmintxfee=0.00001", "-relayfee=0.000005", "-maxtxsizepolicy=0", "-maxstdtxnsperthreadratio=1", "-maxnonstdtxnsperthreadratio=1", '-maxnonstdtxvalidationduration=100000', '-maxtxnvalidatorasynctasksrunduration=100001', '-genesisactivationheight=1', '-maxstackmemoryusageconsensus=2GB', "-maxscriptsizepolicy=2GB", "-acceptnonstdoutputs=1", ], number_of_connections=1) as (conn, ): # send all transactions form block a1 at once and flood the PTV for tx in a1_txs: conn.send_message(msg_tx(tx)) # announce blocks b1, and b2 and send them triggering the reorg headers = msg_headers() headers.headers.append(block_b1) headers.headers.append(block_b2) conn.send_message(headers) conn.send_message(msg_block(block_b1)) conn.send_message(msg_block(block_b2)) # here we are having the PTV and PBV working at the same time, filling the mempool while # the a1 is disconnected # check if everything is as expected wait_until(lambda: conn.rpc.getbestblockhash() == block_b2.hash, timeout=60, check_interval=1) check_mempool_equals(conn.rpc, a1_txs) # now prepare for next scenario conn.rpc.invalidateblock(block_b1.hash) wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash, check_interval=1) # transactions from the disconnected blocks b1 and b2 will not be added to mempool because of # the insufficient priority (zero fee) check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1) with self.run_node_with_connections( "2. Try sending transaction that are spending same inputs as transactions in the disconnected block during the reorg", 0, [ "-blockmintxfee=0.00001", "-relayfee=0.000005", "-maxtxsizepolicy=0", "-maxstdtxnsperthreadratio=1", "-maxnonstdtxnsperthreadratio=1", '-maxnonstdtxvalidationduration=100000', '-maxtxnvalidatorasynctasksrunduration=100001', '-genesisactivationheight=1', '-maxstackmemoryusageconsensus=2GB', "-maxscriptsizepolicy=2GB", "-acceptnonstdoutputs=1", ], number_of_connections=1) as (conn, ): # see if everything is still as expected wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash, check_interval=1) check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1) # send all transactions that are the double-spends of txs form block a1 for double_spend_tx in double_spend_txs: conn.send_message(msg_tx(double_spend_tx)) # announce and send c1, and c2 headers = msg_headers() headers.headers.append(block_c1) headers.headers.append(block_c2) conn.send_message(headers) conn.send_message(msg_block(block_c1)) conn.send_message(msg_block(block_c2)) # here we are having the PTV and PBV working at the same time, filling the mempool with double-spends # while the a1 is disconnected # see if everything is as expected wait_until(lambda: conn.rpc.getbestblockhash() == block_c2.hash, timeout=60, check_interval=1) # in the mempool we want all transactions for blocks a1 # while no double_spend_txs should be present check_mempool_equals(conn.rpc, a1_txs, timeout=60, check_interval=1) # now prepare for next scenario conn.rpc.invalidateblock(block_c1.hash) wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash, check_interval=1) # transactions from the disconnected blocks c1 and c2 will not be added to mempool because of # the insufficient priority (zero fee) check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1) with self.run_node_with_connections( "3. Submit transactions that are spending ouputs from disconnecting block and try to mine a block during the reorg", 0, [ "-blockmintxfee=0.00001", "-relayfee=0.000005", "-maxtxsizepolicy=0", '-maxnonstdtxvalidationduration=100000', '-maxtxnvalidatorasynctasksrunduration=100001', '-genesisactivationheight=1', '-maxstackmemoryusageconsensus=2GB', "-maxscriptsizepolicy=2GB", "-acceptnonstdoutputs=1", ], number_of_connections=1) as (conn, ): # see if everything is still as expected wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash, check_interval=1) check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1) for tx in a1_spends: conn.send_message(msg_tx(tx)) # send transactions that are spending outputs from the soon-to-be-disconnected block (a1) check_mempool_equals(conn.rpc, a1_spends, timeout=100) # announce blocks d1, and d2 and send them triggering the reorg headers = msg_headers() headers.headers.append(block_d1) headers.headers.append(block_d2) conn.send_message(headers) conn.send_message(msg_block(block_d1)) conn.send_message(msg_block(block_d2)) # lets give a chance for reorg to start sleep(0.5) # we are in the middle of the reorg, let try to mine a block # if we are in inconsistent state this call would fail conn.rpc.generate(1)
class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} def setup_network(self): self.extra_args = [['-norelaypriority']] self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() def add_options(self, parser): super().add_options(parser) parser.add_option( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE])): if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value coinbase.rehash() if spend == None: block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.tx.vout[ spend.n].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # spend 1 satoshi tx = create_transaction(spend.tx, spend.n, b"", 1, script) self.sign_tx(tx, spend.tx, spend.n) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Do PoW, which is very inexpensive on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block create_tx = self.create_tx # shorthand for variables node = self.nodes[0] # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(33): out.append(get_spendable_output()) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Creates a new transaction using a p2sh transaction as input def spend_p2sh_tx(p2sh_tx_to_spend, output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append( CTxIn(COutPoint(p2sh_tx_to_spend.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx_to_spend.vout[0].nValue) sig = self.coinbase_key.sign( sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # P2SH tests # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction( out[0].tx, out[0].n, 1, p2sh_script) # Add the transaction to the block block(1) update_block(1, [p2sh_tx]) yield accepted() # Sigops p2sh limit for the mempool test p2sh_sigops_limit_mempool = MAX_STANDARD_TX_SIGOPS - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh script too_many_p2sh_sigops_mempool = CScript( [OP_CHECKSIG] * (p2sh_sigops_limit_mempool + 1)) # A transaction with this output script can't get into the mempool assert_raises_rpc_error(-26, RPC_TXNS_TOO_MANY_SIGOPS_ERROR, node.sendrawtransaction, ToHex(spend_p2sh_tx(p2sh_tx, too_many_p2sh_sigops_mempool))) # The transaction is rejected, so the mempool should still be empty assert_equal(set(node.getrawmempool()), set()) # Max sigops in one p2sh txn max_p2sh_sigops_mempool = CScript( [OP_CHECKSIG] * (p2sh_sigops_limit_mempool)) # A transaction with this output script can get into the mempool max_p2sh_sigops_txn = spend_p2sh_tx(p2sh_tx, max_p2sh_sigops_mempool) max_p2sh_sigops_txn_id = node.sendrawtransaction( ToHex(max_p2sh_sigops_txn)) assert_equal(set(node.getrawmempool()), {max_p2sh_sigops_txn_id}) # Mine the transaction block(2, spend=out[1]) update_block(2, [max_p2sh_sigops_txn]) yield accepted() # The transaction has been mined, it's not in the mempool anymore assert_equal(set(node.getrawmempool()), set())
def __init__(self, *a, **kw): super(MemepoolAcceptingTransactionsDuringReorg, self).__init__(*a, **kw) self.private_key = CECKey() self.private_key.set_secretbytes(b"fatstacks") self.public_key = self.private_key.get_pubkey()
def run_test(self): # Generate enough blocks to trigger certain block votes and activate BIP65 (version 4 blocks) if 1: amt = 1352 - self.nodes[0].getblockcount() for i in range(int(amt/100)): self.nodes[0].generate(100) self.sync_all() self.nodes[0].generate(1352 - self.nodes[0].getblockcount()) self.sync_all() logging.info("not on chain tip") badtip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) cur_time = int(time.time()) self.nodes[0].setmocktime(cur_time) self.nodes[1].setmocktime(cur_time) block = create_block(badtip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: does not build on chain tip") logging.info("time too far in the past") block = create_block(tip, coinbase, cur_time - 100) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: time-too-old") logging.info("time too far in the future") block = create_block(tip, coinbase, cur_time + 10000000) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: time-too-new") logging.info("bad version 1") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 1 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad version 2") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 2 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad version 3") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 3 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad coinbase height") tip = int(self.nodes[0].getblockhash(height), 16) block = create_block(tip, create_coinbase(height), cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-cb-height") logging.info("bad merkle root") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.hashMerkleRoot = 0x12345678 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txnmrklroot") logging.info("no tx") block = create_block(tip, None, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-blk-length") logging.info("good block") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) # ------ self.nodes[0].validateblocktemplate(hexblk) block.solve() hexblk = ToHex(block) self.nodes[0].submitblock(hexblk) self.sync_all() prev_block = block # out_value is less than 50BTC because regtest halvings happen every 150 blocks, and is in Satoshis out_value = block.vtx[0].vout[0].nValue tx1 = create_transaction(prev_block.vtx[0], 0, b'\x61'*50 + b'\x51', [int(out_value / 2), int(out_value / 2)]) height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = cur_time + 1200 logging.info("no coinbase") block = create_block(tip, None, next_time, [tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-cb-missing") logging.info("double coinbase") coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() coinbase2 = create_coinbase(height + 1, coinbase_pubkey) block = create_block(tip, coinbase, next_time, [coinbase2, tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-cb-multiple") logging.info("premature coinbase spend") block = create_block(tip, coinbase, next_time, [tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-premature-spend-of-coinbase") self.nodes[0].generate(100) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = cur_time + 1200 op1 = OP_1.toBin() logging.info("inputs below outputs") tx6 = create_transaction(prev_block.vtx[0], 0, op1, [out_value + 1000]) block = create_block(tip, coinbase, next_time, [tx6]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-in-belowout") tx5 = create_transaction(prev_block.vtx[0], 0, op1, [int(21000001 * COIN)]) logging.info("money range") block = create_block(tip, coinbase, next_time, [tx5]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-vout-toolarge") logging.info("bad tx offset") tx_bad = create_broken_transaction(prev_block.vtx[0], 1, op1, [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx_bad]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") logging.info("bad tx offset largest number") tx_bad = create_broken_transaction(prev_block.vtx[0], 0xffffffff, op1, [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx_bad]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") logging.info("double tx") tx2 = create_transaction(prev_block.vtx[0], 0, op1, [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx2, tx2]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "repeated-txn") tx3 = create_transaction(prev_block.vtx[0], 0, op1, [int(out_value / 9), int(out_value / 10)]) tx4 = create_transaction(prev_block.vtx[0], 0, op1, [int(out_value / 8), int(out_value / 7)]) logging.info("double spend") block = create_block(tip, coinbase, next_time, [tx3, tx4]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") txes = [tx3, tx4] txes.sort(key=lambda x: x.hash, reverse=True) logging.info("bad tx ordering") block = create_block(tip, coinbase, next_time, txes, ctor=False) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txn-order") tx_good = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 50)] * 50, out=b"") logging.info("good tx") block = create_block(tip, coinbase, next_time, [tx_good]) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) self.nodes[0].validateblocktemplate(hexblk) self.nodes[0].submitblock(hexblk) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = next_time + 600 coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() coinbase3 = create_coinbase(height + 1, coinbase_pubkey) txl = [] for i in range(0, 50): ov = block.vtx[1].vout[i].nValue txl.append(create_transaction(block.vtx[1], i, op1, [int(ov / 50)] * 50)) block = create_block(tip, coinbase, next_time, txl) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) for n in self.nodes: n.validateblocktemplate(hexblk) logging.info("excessive") self.nodes[0].setminingmaxblock(1000) self.nodes[0].setexcessiveblock(1000, 12) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: excessive") logging.info("EB min value") self.nodes[0].setminingmaxblock(1000) expectException(lambda: self.nodes[0].setexcessiveblock(999, 12), JSONRPCException, "Sorry, your maximum mined block (1000) is larger than your proposed excessive size (999). This would cause you to orphan your own blocks.") self.nodes[0].setexcessiveblock(16 * 1000 * 1000, 12) self.nodes[0].setminingmaxblock(1000 * 1000) for it in range(0, 100): # if (it&1023)==0: print(it) h2 = hexblk pos = random.randint(0, len(hexblk)) val = random.randint(0, 15) h3 = h2[:pos] + ('%x' % val) + h2[pos + 1:] try: self.nodes[0].validateblocktemplate(h3) except JSONRPCException as e: if not (e.error["code"] == -1 or e.error["code"] == -22): print(str(e)) # its ok we expect garbage self.nodes[1].submitblock(hexblk) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = next_time + 600 prev_block = block txl = [] for tx in prev_block.vtx: for outp in range(0, len(tx.vout)): ov = tx.vout[outp].nValue txl.append(create_transaction(tx, outp, CScript([OP_CHECKSIG] * 100), [int(ov / 2)] * 2)) block = create_block(tip, coinbase, next_time, txl) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) for n in self.nodes: expectException(lambda: n.validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-blk-sigops")
class FullBlockTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def __init__(self): self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(bytes("horsebattery")) self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.block_time = int(time.time())+1 self.tip = None self.blocks = {} def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread test.run() def add_transactions_to_block(self, block, tx_list): [ tx.rehash() for tx in tx_list ] block.vtx.extend(tx_list) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() return block # Create a block on top of self.tip, and advance self.tip to point to the new block # if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, # and rest will go to fees. def next_block(self, number, spend=None, additional_coinbase_value=0, script=None): if self.tip == None: base_block_hash = self.genesis_hash else: base_block_hash = self.tip.sha256 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, self.block_time) if (spend != None): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), "", 0xffffffff)) # no signature yet # This copies the java comparison tool testing behavior: the first # txout has a garbage scriptPubKey, "to make sure we're not # pre-verifying too much" (?) tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) # Now sign it if necessary scriptSig = "" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL) scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block block = self.add_transactions_to_block(block, [tx]) block.solve() self.tip = block self.block_heights[block.sha256] = height self.block_time += 1 assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previous marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(): return TestInstance([[self.tip, False]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # creates a new block and advances the tip to that block block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(100): block(1000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Start by bulding a couple of blocks on top (which output is spent is in parentheses): # genesis -> b1 (0) -> b2 (1) out0 = get_spendable_output() block(1, spend=out0) save_spendable_output() yield accepted() out1 = get_spendable_output() block(2, spend=out1) # Inv again, then deliver twice (shouldn't break anything). yield accepted() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. tip(1) block(3, spend=out1) # Deliver twice (should still not break anything) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) out2 = get_spendable_output() block(4, spend=out2) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out2) save_spendable_output() yield rejected() out3 = get_spendable_output() block(6, spend=out3) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out2) yield rejected() out4 = get_spendable_output() block(8, spend=out4) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out4, additional_coinbase_value=1) yield rejected() # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out3) yield rejected() block(11, spend=out4, additional_coinbase_value=1) yield rejected() # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out3) save_spendable_output() #yield TestInstance([[b12, False]]) b13 = block(13, spend=out4) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() out5 = get_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out5, additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50 - 1)) tip(13) block(15, spend=out5, script=lots_of_checksigs) yield accepted() # Test that a block with too many checksigs is rejected out6 = get_spendable_output() too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50)) block(16, spend=out6, script=too_many_checksigs) yield rejected()
def run_test(self): node = self.nodes[0] node.add_p2p_connection(P2PDataStore()) network_thread_start() node.p2p.wait_for_verack() node.setmocktime(REPLAY_PROTECTION_START_TIME) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand block = self.next_block # Create a new block block(0) save_spendable_output() node.p2p.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() node.p2p.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"replayprotection") public_key = private_key.get_pubkey() # This is a little handier to use than the version in blocktools.py def create_fund_and_spend_tx(spend, forkvalue=0): # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_transaction( spend.tx, spend.n, b'', 50 * COIN, script) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId( script, txspend, 0, sighashtype, 50 * COIN) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() return [txfund, txspend] def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert(tx_id in set(node.getrawmempool())) return tx_id # Before the fork, no replay protection required to get in the mempool. txns = create_fund_and_spend_tx(out[0]) send_transaction_to_mempool(txns[0]) send_transaction_to_mempool(txns[1]) # And txns get mined in a block properly. block(1) update_block(1, txns) node.p2p.send_blocks_and_test([self.tip], node) # Replay protected transactions are rejected. replay_txns = create_fund_and_spend_tx(out[1], 0xffdead) send_transaction_to_mempool(replay_txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) # And block containing them are rejected as well. block(2) update_block(2, replay_txns) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(1) # Create a block that would activate the replay protection. bfork = block(5555) bfork.nTime = REPLAY_PROTECTION_START_TIME - 1 update_block(5555, []) node.p2p.send_blocks_and_test([self.tip], node) activation_blocks = [] for i in range(5): block(5100 + i) activation_blocks.append(self.tip) node.p2p.send_blocks_and_test(activation_blocks, node) # Check we are just before the activation time assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'], REPLAY_PROTECTION_START_TIME - 1) # We are just before the fork, replay protected txns still are rejected assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) block(3) update_block(3, replay_txns) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(5104) # Send some non replay protected txns in the mempool to check # they get cleaned at activation. txns = create_fund_and_spend_tx(out[2]) send_transaction_to_mempool(txns[0]) tx_id = send_transaction_to_mempool(txns[1]) # Activate the replay protection block(5556) node.p2p.send_blocks_and_test([self.tip], node) # Check we just activated the replay protection assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'], REPLAY_PROTECTION_START_TIME) # Non replay protected transactions are not valid anymore, # so they should be removed from the mempool. assert(tx_id not in set(node.getrawmempool())) # Good old transactions are now invalid. send_transaction_to_mempool(txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(txns[1])) # They also cannot be mined block(4) update_block(4, txns) node.p2p.send_blocks_and_test( [self.tip], node, success=False, reject_reason='blk-bad-inputs') # Rewind bad block tip(5556) # The replay protected transaction is now valid replay_tx0_id = send_transaction_to_mempool(replay_txns[0]) replay_tx1_id = send_transaction_to_mempool(replay_txns[1]) # Make sure the transaction are ready to be mined. tmpl = node.getblocktemplate() found_id0 = False found_id1 = False for txn in tmpl['transactions']: txid = txn['txid'] if txid == replay_tx0_id: found_id0 = True elif txid == replay_tx1_id: found_id1 = True assert(found_id0 and found_id1) # And the mempool is still in good shape. assert(replay_tx0_id in set(node.getrawmempool())) assert(replay_tx1_id in set(node.getrawmempool())) # They also can also be mined block(5) update_block(5, replay_txns) node.p2p.send_blocks_and_test([self.tip], node) # Ok, now we check if a reorg work properly across the activation. postforkblockid = node.getbestblockhash() node.invalidateblock(postforkblockid) assert(replay_tx0_id in set(node.getrawmempool())) assert(replay_tx1_id in set(node.getrawmempool())) # Deactivating replay protection. forkblockid = node.getbestblockhash() node.invalidateblock(forkblockid) # The funding tx is not evicted from the mempool, since it's valid in # both sides of the fork assert(replay_tx0_id in set(node.getrawmempool())) assert(replay_tx1_id not in set(node.getrawmempool())) # Check that we also do it properly on deeper reorg. node.reconsiderblock(forkblockid) node.reconsiderblock(postforkblockid) node.invalidateblock(forkblockid) assert(replay_tx0_id in set(node.getrawmempool())) assert(replay_tx1_id not in set(node.getrawmempool()))
def print_wif_address(secret): k = CECKey() k.set_secretbytes(bytes.fromhex(secret)) pk = k.get_pubkey() print(k.get_wif(b'\x80'), key_to_p2pkh(pk, True))
class P2SH(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.genesisactivationheight = 203 # Build the redeem script, hash it, use hash to create the p2sh script self.redeem_script = CScript( [self.coinbase_pubkey, OP_2DUP, OP_CHECKSIGVERIFY, OP_CHECKSIG]) self.p2sh_script = CScript( [OP_HASH160, hash160(self.redeem_script), OP_EQUAL]) self.extra_args = [[ '-acceptnonstdtxn=0', '-acceptnonstdoutputs=0', '-banscore=1000000', f'-genesisactivationheight={self.genesisactivationheight}', '-maxgenesisgracefulperiod=1' ]] def run_test(self): self.test.run() # Creates a new transaction using a p2sh transaction as input def spend_p2sh_tx(self, p2sh_tx_to_spend, output_script=SPEND_OUTPUT, privateKey=None): privateKey = privateKey or self.coinbase_key # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append( CTxIn(COutPoint(p2sh_tx_to_spend.sha256, 0), b'')) spent_p2sh_tx.vout.append( CTxOut(p2sh_tx_to_spend.vout[0].nValue - 100, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId(self.redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx_to_spend.vout[0].nValue) sig = privateKey.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, self.redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx def get_tests(self): # shorthand for functions block = self.chain.next_block node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # Create and mature coinbase txs test = TestInstance(sync_every_block=False) for i in range(200): block(i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on coinbase_utxos = [self.chain.get_spendable_output() for _ in range(50)] # Create a p2sh transactions that spends coinbase tx def new_P2SH_tx(): output = coinbase_utxos.pop(0) return create_and_sign_transaction(spend_tx=output.tx, n=output.n, value=output.tx.vout[0].nValue - 100, private_key=self.coinbase_key, script=self.p2sh_script) # Add the transactions to the block block(200) p2sh_txs = [new_P2SH_tx() for _ in range(4)] self.chain.update_block(200, p2sh_txs) yield self.accepted() coinbase_to_p2sh_tx = new_P2SH_tx() # rpc tests node.signrawtransaction( ToHex(coinbase_to_p2sh_tx )) # check if we can sign this tx (sign is unused) coinbase_to_p2sh_tx_id = node.sendrawtransaction( ToHex(coinbase_to_p2sh_tx)) # sending using rpc # Create new private key that will fail with the redeem script wrongPrivateKey = CECKey() wrongPrivateKey.set_secretbytes(b"wrongkeysecret") wrongkey_txn = self.spend_p2sh_tx(p2sh_txs[0], privateKey=wrongPrivateKey) # A transaction with this output script can't get into the mempool assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed", node.sendrawtransaction, ToHex(wrongkey_txn)) # A transaction with this output script can get into the mempool correctkey_tx = self.spend_p2sh_tx(p2sh_txs[1]) correctkey_tx_id = node.sendrawtransaction(ToHex(correctkey_tx)) assert_equal(set(node.getrawmempool()), {correctkey_tx_id, coinbase_to_p2sh_tx_id}) block(201) self.chain.update_block(201, [correctkey_tx, coinbase_to_p2sh_tx]) yield self.accepted() assert node.getblockcount() == self.genesisactivationheight - 1 # This block would be at genesis height # transactions with P2SH output will be rejected block(202) p2sh_tx_after_genesis = new_P2SH_tx() self.chain.update_block(202, [p2sh_tx_after_genesis]) yield self.rejected(RejectResult(16, b'bad-txns-vout-p2sh')) self.chain.set_tip(201) block(203, coinbase_pubkey=self.coinbase_pubkey) yield self.accepted() # we are at gensis height assert node.getblockcount() == self.genesisactivationheight # P2SH transactions are rejected and cant enter the mempool assert_raises_rpc_error(-26, "bad-txns-vout-p2sh", node.sendrawtransaction, ToHex(new_P2SH_tx())) # Create new private key that would fail with the old redeem script, the same behavior as before genesis wrongPrivateKey = CECKey() wrongPrivateKey.set_secretbytes(b"wrongkeysecret") wrongkey_txn = self.spend_p2sh_tx(p2sh_txs[2], privateKey=wrongPrivateKey) # A transaction with this output script can't get into the mempool assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed", node.sendrawtransaction, ToHex(wrongkey_txn)) # We can spend old P2SH transactions correctkey_tx = self.spend_p2sh_tx(p2sh_txs[3]) sign_result = node.signrawtransaction(ToHex(correctkey_tx)) assert sign_result['complete'], "Should be able to sign" correctkey_tx_id = node.sendrawtransaction(ToHex(correctkey_tx)) assert_equal(set(node.getrawmempool()), {correctkey_tx_id}) tx1_raw = node.getrawtransaction(p2sh_txs[0].hash, True) assert tx1_raw["vout"][0]["scriptPubKey"]["type"] == "scripthash"
class RPCSendRawTransactions(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.genesisactivationheight = 600 self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.locking_script = CScript([self.coinbase_pubkey, OP_CHECKSIG]) self.default_args = [ '-debug', '-maxgenesisgracefulperiod=0', '-genesisactivationheight=%d' % self.genesisactivationheight ] self.extra_args = [self.default_args] * self.num_nodes def run_test(self): self.test.run() # Sign a transaction, using the key we know about. # This signs input 0 in tx, which is assumed to be spending output n in spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) def check_mempool(self, rpc, should_be_in_mempool, timeout=20): wait_until(lambda: set(rpc.getrawmempool()) == {t.hash for t in should_be_in_mempool}, timeout=timeout) # Generating transactions in order so first transaction's output will be an input for second transaction def get_chained_transactions(self, spend, num_of_transactions, money_to_spend=5000000000): txns = [] for _ in range(0, num_of_transactions): money_to_spend = money_to_spend - 1000 # one satoshi to fee tx = create_transaction(spend.tx, spend.n, b"", money_to_spend, self.locking_script) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() txns.append(tx) spend = PreviousSpendableOutput(tx, 0) return txns # Create a required number of chains with equal length. def get_txchains_n(self, num_of_chains, chain_length, spend): if num_of_chains > len(spend): raise Exception('Insufficient number of spendable outputs.') txchains = [] for x in range(0, num_of_chains): txchains += self.get_chained_transactions(spend[x], chain_length) return txchains # Test an expected valid results, depending on node's configuration. def run_scenario1(self, conn, num_of_chains, chain_length, spend, allowhighfees=False, dontcheckfee=False, useRpcWithDefaults=False, shuffle_txs=False, timeout=30): # Create and send tx chains. txchains = self.get_txchains_n(num_of_chains, chain_length, spend) # Shuffle txs if it is required if shuffle_txs: random.shuffle(txchains) # Prepare inputs for sendrawtransactions rpc_txs_bulk_input = [] for tx in range(len(txchains)): # Collect txn input data for bulk submit through rpc interface. if useRpcWithDefaults: rpc_txs_bulk_input.append({'hex': ToHex(txchains[tx])}) else: rpc_txs_bulk_input.append({ 'hex': ToHex(txchains[tx]), 'allowhighfees': allowhighfees, 'dontcheckfee': dontcheckfee }) # Submit bulk tranactions. rejected_txns = conn.rpc.sendrawtransactions(rpc_txs_bulk_input) # There should be no rejected transactions. assert_equal(len(rejected_txns), 0) # Check if required transactions are accepted by the mempool. self.check_mempool(conn.rpc, txchains, timeout) # Test an expected invalid results and invalid input data conditions. def run_scenario2(self, conn, timeout=30): # # sendrawtransactions with missing input # # inputs = [{ 'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1 }] # won't exists outputs = {conn.rpc.getnewaddress(): 4.998} rawtx = conn.rpc.createrawtransaction(inputs, outputs) rawtx = conn.rpc.signrawtransaction(rawtx) rejected_txns = conn.rpc.sendrawtransactions([{'hex': rawtx['hex']}]) assert_equal(len(rejected_txns['invalid']), 1) # Reject invalid assert_equal(rejected_txns['invalid'][0]['reject_code'], 16) assert_equal(rejected_txns['invalid'][0]['reject_reason'], "missing-inputs") # No transactions should be in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) # # An empty json array of objects. # assert_raises_rpc_error( -8, "Invalid parameter: An empty json array of objects", conn.rpc.sendrawtransactions, []) # # An empty json object. # assert_raises_rpc_error(-8, "Invalid parameter: An empty json object", conn.rpc.sendrawtransactions, [{}]) # # Missing the hex string of the raw transaction. # assert_raises_rpc_error( -8, "Invalid parameter: Missing the hex string of the raw transaction", conn.rpc.sendrawtransactions, [{ 'dummy_str': 'dummy_value' }]) assert_raises_rpc_error( -8, "Invalid parameter: Missing the hex string of the raw transaction", conn.rpc.sendrawtransactions, [{ 'hex': -1 }]) # # TX decode failed. # assert_raises_rpc_error(-22, "TX decode failed", conn.rpc.sendrawtransactions, [{ 'hex': '050000000100000000a0ce6e35' }]) # # allowhighfees: Invalid value # assert_raises_rpc_error(-8, "allowhighfees: Invalid value", conn.rpc.sendrawtransactions, [{ 'hex': rawtx['hex'], 'allowhighfees': -1 }]) assert_raises_rpc_error(-8, "allowhighfees: Invalid value", conn.rpc.sendrawtransactions, [{ 'hex': rawtx['hex'], 'allowhighfees': 'dummy_value' }]) # # dontcheckfee: Invalid value # assert_raises_rpc_error(-8, "dontcheckfee: Invalid value", conn.rpc.sendrawtransactions, [{ 'hex': rawtx['hex'], 'dontcheckfee': -1 }]) assert_raises_rpc_error(-8, "dontcheckfee: Invalid value", conn.rpc.sendrawtransactions, [{ 'hex': rawtx['hex'], 'dontcheckfee': 'dummy_value' }]) # Test an attempt to submit transactions (via rpc interface) which are already known # - received earlier through the p2p interface and not processed yet def run_scenario3(self, conn, num_of_chains, chain_length, spend, allowhighfees=False, dontcheckfee=False, timeout=30): # Create and send tx chains. txchains = self.get_txchains_n(num_of_chains, chain_length, spend) # Prepare inputs for sendrawtransactions rpc_txs_bulk_input = [] for tx in range(len(txchains)): # Collect txn input data for bulk submit through rpc interface. rpc_txs_bulk_input.append({ 'hex': ToHex(txchains[tx]), 'allowhighfees': allowhighfees, 'dontcheckfee': dontcheckfee }) # Send a txn, one by one, through p2p interface. conn.send_message(msg_tx(txchains[tx])) # Check if there is an expected number of transactions in the validation queues # - this scenario relies on ptv delayed processing wait_until(lambda: conn.rpc.getblockchainactivity()["transactions"] == num_of_chains * chain_length, timeout=timeout) # Submit a batch of txns through rpc interface. rejected_txns = conn.rpc.sendrawtransactions(rpc_txs_bulk_input) # There should be num_of_chains * chain_length rejected transactions. # - there are num_of_chains*chain_length known transactions # - due to the fact that all were received through the p2p interface # - all are waiting in the ptv queues assert_equal(len(rejected_txns['known']), num_of_chains * chain_length) # No transactions should be in the mempool. assert_equal(conn.rpc.getmempoolinfo()['size'], 0) # Test duplicated input data set submitted through the rpc interface. # - input data are shuffled def run_scenario4(self, conn, num_of_chains, chain_length, spend, allowhighfees=False, dontcheckfee=False, timeout=30): # Create and send tx chains. txchains = self.get_txchains_n(num_of_chains, chain_length, spend) # Prepare duplicated inputs for sendrawtransactions rpc_txs_bulk_input = [] for tx in range(len(txchains)): rpc_txs_bulk_input.append({ 'hex': ToHex(txchains[tx]), 'allowhighfees': allowhighfees, 'dontcheckfee': dontcheckfee }) rpc_txs_bulk_input.append({ 'hex': ToHex(txchains[tx]), 'allowhighfees': allowhighfees, 'dontcheckfee': dontcheckfee }) # Shuffle inputs. random.shuffle(rpc_txs_bulk_input) # Submit bulk input. rejected_txns = conn.rpc.sendrawtransactions(rpc_txs_bulk_input) # There should be rejected known transactions. assert_equal(len(rejected_txns), 1) assert_equal(len(rejected_txns['known']), num_of_chains * chain_length) assert (set(rejected_txns['known']) == {t.hash for t in txchains}) # Check if required transactions are accepted by the mempool. self.check_mempool(conn.rpc, txchains, timeout) def get_tests(self): rejected_txs = [] def on_reject(conn, msg): rejected_txs.append(msg) # Shorthand for functions block = self.chain.next_block node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # Create a new block block(0, coinbase_pubkey=self.coinbase_pubkey) self.chain.save_spendable_output() yield self.accepted() # Now we need that block to mature so we can spend the coinbase. # Also, move block height on beyond Genesis activation. test = TestInstance(sync_every_block=False) for i in range(600): block(5000 + i, coinbase_pubkey=self.coinbase_pubkey) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # Collect spendable outputs now to avoid cluttering the code later on. out = [] for i in range(200): out.append(self.chain.get_spendable_output()) self.stop_node(0) #==================================================================== # Valid test cases. # - a bulk submit of txns through sendrawtransactions rpc interface # - all transactions are valid and accepted by the mempool #==================================================================== # Scenario 1 (TS1). # This test case checks a bulk submit of txs, through rpc sendrawtransactions interface, with default params. # - 1K txs used # - allowhighfees=False (default) # - dontcheckfee=False (default) # - txn chains are in ordered sequence (no orphans should be detected during processing) # # Test case config num_of_chains = 10 chain_length = 100 # Node's config args = [ '-txnvalidationasynchrunfreq=100', '-maxorphantxsize=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS1: {} chains of length {}. Default params for rpc call.'. format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out, useRpcWithDefaults=True, timeout=20) # Scenario 2 (TS2). # This test case checks a bulk submit of txs, through rpc sendrawtransactions interface, with default params. # - 1K txs used # - allowhighfees=False (default) # - dontcheckfee=False (default) # - txn chains are shuffled (orphans should be detected during processing) # # Test case config num_of_chains = 10 chain_length = 100 # Node's config args = [ '-txnvalidationasynchrunfreq=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS2: {} chains of length {}. Shuffled txs. Default params for rpc call.' .format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out, useRpcWithDefaults=True, shuffle_txs=True, timeout=20) # Scenario 3 (TS3). # This test case checks a bulk submit of txs, through rpc sendrawtransactions interface, with default params. # - 10K txs used # - allowhighfees=False (default) # - dontcheckfee=False (default) # - txn chains are in ordered sequence (no orphans should be detected during processing) # # Test case config num_of_chains = 100 chain_length = 100 # Node's config args = [ '-txnvalidationasynchrunfreq=0', '-maxorphantxsize=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS3: {} chains of length {}. Default params for rpc call.'. format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out, useRpcWithDefaults=True, timeout=30) # Scenario 4 (TS5). # This test case checks a bulk submit of txs, through rpc sendrawtransactions interface, with explicitly declared default params. # - 1K txs used # - allowhighfees=False (an explicit default value) # - dontcheckfee=False (an explicit default value) # - txn chains are in ordered sequence (no orphans should be detected during processing) # # Test case config num_of_chains = 10 chain_length = 100 allowhighfees = False dontcheckfee = False # Node's config args = [ '-txnvalidationasynchrunfreq=100', '-maxorphantxsize=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS4: {} chains of length {}. allowhighfees={}, dontcheckfee={}.' .format(num_of_chains, chain_length, str(allowhighfees), str(dontcheckfee)), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out, allowhighfees, dontcheckfee, timeout=20) # Scenario 5 (TS5). # This test case checks a bulk submit of txs, through rpc sendrawtransactions interface, with non-default params. # - 1K txs used # - allowhighfees=True # - dontcheckfee=True # - txn chains are in ordered sequence (no orphans should be detected during processing) # # Test case config num_of_chains = 10 chain_length = 100 allowhighfees = True dontcheckfee = True # Node's config args = [ '-txnvalidationasynchrunfreq=100', '-maxorphantxsize=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS5: {} chains of length {}. allowhighfees={}, dontcheckfee={}.' .format(num_of_chains, chain_length, str(allowhighfees), str(dontcheckfee)), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario1(conn, num_of_chains, chain_length, out, allowhighfees, dontcheckfee, timeout=20) #==================================================================== # Invalid test cases and non-empty rejects # - test invalid data # - test rejected transactions # - test duplicates #==================================================================== # Scenario 6 (TS6). # # Node's config args = [ '-txnvalidationasynchrunfreq=100', '-maxorphantxsize=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS6: Invalid conditions', 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario2(conn, timeout=20) # Scenario 7 (TS7). # # Test case config num_of_chains = 10 chain_length = 10 # Node's config args = [ '-txnvalidationasynchrunfreq=10000', '-maxstdtxnsperthreadratio=0', # Do not take any std txs for processing (from the ptv queues). '-maxnonstdtxnsperthreadratio=0', # Do not take any non-std txs for processing (from the ptv queues). '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS7: {} chains of length {}. Reject known transactions'. format(num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario3(conn, num_of_chains, chain_length, out, timeout=30) # Scenario 8 (TS8). # This test case checks a bulk submit of duplicated txs, through rpc sendrawtransactions interface. # - 2K txs used (1K are detected as duplicates - known transactions in the result set) # - rpc input data set is shuffled # # Test case config num_of_chains = 10 chain_length = 100 # Node's config args = [ '-txnvalidationasynchrunfreq=0', '-limitancestorcount=100', '-checkmempool=0', '-persistmempool=0' ] with self.run_node_with_connections( 'TS8: {} chains of length {}. Test duplicated inputs.'.format( num_of_chains, chain_length), 0, args + self.default_args, number_of_connections=1) as (conn, ): # Run test case. self.run_scenario4(conn, num_of_chains, chain_length, out, timeout=20)
def create_unsigned_pos_block(self, staking_prevouts, nTime=None, outNValue=10002, signStakeTx=True, bestBlockHash=None, coinStakePrevout=None): if not nTime: current_time = int(time.time()) + 15 nTime = current_time & 0xfffffff0 if not bestBlockHash: bestBlockHash = self.node.getbestblockhash() block_height = self.node.getblockcount() else: block_height = self.node.getblock(bestBlockHash)['height'] parent_block_stake_modifier = int( self.node.getblock(bestBlockHash)['modifier'], 16) parent_block_raw_hex = self.node.getblock(bestBlockHash, False) f = io.BytesIO(hex_str_to_bytes(parent_block_raw_hex)) parent_block = CBlock() parent_block.deserialize(f) coinbase = create_coinbase(block_height + 1) coinbase.vout[0].nValue = 0 coinbase.vout[0].scriptPubKey = b"" coinbase.rehash() block = create_block(int(bestBlockHash, 16), coinbase, nTime) block.hashPrevBlock = int(bestBlockHash, 16) if not block.solve_stake(parent_block_stake_modifier, staking_prevouts): return None # create a new private key used for block signing. block_sig_key = CECKey() block_sig_key.set_secretbytes(hash256(struct.pack('<I', 0xffff))) pubkey = block_sig_key.get_pubkey() scriptPubKey = CScript([pubkey, OP_CHECKSIG]) stake_tx_unsigned = CTransaction() if not coinStakePrevout: coinStakePrevout = block.prevoutStake stake_tx_unsigned.vin.append(CTxIn(coinStakePrevout)) stake_tx_unsigned.vout.append(CTxOut()) stake_tx_unsigned.vout.append( CTxOut(int(outNValue * COIN), scriptPubKey)) stake_tx_unsigned.vout.append( CTxOut(int(outNValue * COIN), scriptPubKey)) if signStakeTx: stake_tx_signed_raw_hex = self.node.signrawtransaction( bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex'] f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)) stake_tx_signed = CTransaction() stake_tx_signed.deserialize(f) block.vtx.append(stake_tx_signed) else: block.vtx.append(stake_tx_unsigned) block.hashMerkleRoot = block.calc_merkle_root() return (block, block_sig_key)
def create_block(self, prev_hash, staking_prevouts, height, node_n, s_address, fInvalid=0): api = self.nodes[node_n] # Get current time current_time = int(time.time()) nTime = current_time & 0xfffffff0 # Create coinbase TX coinbase = create_coinbase(height) coinbase.vout[0].nValue = 0 coinbase.vout[0].scriptPubKey = b"" coinbase.nTime = nTime coinbase.rehash() # Create Block with coinbase block = create_block(int(prev_hash, 16), coinbase, nTime) # Find valid kernel hash - Create a new private key used for block signing. if not block.solve_stake(staking_prevouts): raise Exception("Not able to solve for any prev_outpoint") # Create coinstake TX amount, prev_time, prevScript = staking_prevouts[block.prevoutStake] outNValue = int(amount + 250 * COIN) stake_tx_unsigned = CTransaction() stake_tx_unsigned.nTime = block.nTime stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake)) stake_tx_unsigned.vin[0].nSequence = 0xffffffff stake_tx_unsigned.vout.append(CTxOut()) stake_tx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript))) if fInvalid == 1: # Create a new private key and get the corresponding public key block_sig_key = CECKey() block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff))) pubkey = block_sig_key.get_pubkey() stake_tx_unsigned.vout[1].scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: # Export the staking private key to sign the block with it privKey, compressed = wif_to_privkey(api.dumpprivkey(s_address)) block_sig_key = CECKey() block_sig_key.set_compressed(compressed) block_sig_key.set_secretbytes(bytes.fromhex(privKey)) # check the address addy = key_to_p2pkh(bytes_to_hex_str(block_sig_key.get_pubkey()), False, True) assert (addy == s_address) if fInvalid == 2: # add a new output with 100 coins from the pot new_key = CECKey() new_key.set_secretbytes(hash256(pack('<I', 0xffff))) pubkey = new_key.get_pubkey() stake_tx_unsigned.vout.append(CTxOut(100 * COIN, CScript([pubkey, OP_CHECKSIG]))) stake_tx_unsigned.vout[1].nValue = outNValue - 100 * COIN # Sign coinstake TX and add it to the block stake_tx_signed_raw_hex = api.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex'] stake_tx_signed = CTransaction() stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))) block.vtx.append(stake_tx_signed) # Get correct MerkleRoot and rehash block block.hashMerkleRoot = block.calc_merkle_root() block.rehash() # sign block with block signing key and return it block.sign_block(block_sig_key) return block
def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # shorthand block = self.next_block node = self.nodes[0] node_ban = self.nodes[1] # save the current tip so its coinbase can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get a coinbase that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(199): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Generate a key pair to test P2SH sigops count privkeybytes = b"Schnorr!" * 4 private_key = CECKey() private_key.set_secretbytes(privkeybytes) # get uncompressed public key serialization public_key = private_key.get_pubkey() def create_fund_and_spend_tx(spend, multi=False, sig='schnorr'): if multi: script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) else: script = CScript([public_key, OP_CHECKSIG]) # Fund transaction txfund = create_transaction(spend.tx, spend.n, b'', 50 * COIN, script) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId(script, txspend, 0, sighashtype, 50 * COIN) if sig == 'schnorr': txsig = schnorr.sign(privkeybytes, sighash) + hashbyte elif sig == 'ecdsa': txsig = private_key.sign(sighash) + hashbyte elif isinstance(sig, bytes): txsig = sig + hashbyte if multi: txspend.vin[0].scriptSig = CScript([b'', txsig]) else: txspend.vin[0].scriptSig = CScript([txsig]) txspend.rehash() return txfund, txspend def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert (tx_id in set(node.getrawmempool())) return tx_id # Check we are not banned when sending a txn that node_ban rejects. def check_for_no_ban_on_rejected_tx(tx, reject_code, reject_reason): # Grab the first connection p2p = node_ban.p2p assert (p2p.state == 'connected') # The P2PConnection stores a public counter for each message type # and the last receive message of each type. We use this counter to # identify that we received a new reject message. with mininode_lock: rejects_count = p2p.message_count['reject'] # Send the transaction directly. We use a ping for synchronization: # if we have been banned, the pong message won't be received, a # timeout occurs and the test fails. p2p.send_message(msg_tx(tx)) p2p.sync_with_ping() # Check we haven't been disconnected assert (p2p.state == 'connected') # Check the reject message matches what we expected with mininode_lock: assert (p2p.message_count['reject'] == rejects_count + 1) reject_msg = p2p.last_message['reject'] assert (reject_msg.code == reject_code and reject_msg.reason == reject_reason and reject_msg.data == tx.sha256) # Check we are disconnected when sending a txn that node_ban rejects. # (Can't actually get banned, since bitcoind won't ban local peers.) def check_for_ban_on_rejected_tx(tx): # Take a connection p2p = node_ban.p2ps.pop() assert (p2p.state == 'connected') # make sure we can ping p2p.sync_with_ping() # send the naughty transaction p2p.send_message(msg_tx(tx)) # if not "banned", this will timeout and raise exception. p2p.wait_for_disconnect() # Setup fundings fundings = [] fund, schnorrchecksigtx = create_fund_and_spend_tx(out[0]) fundings.append(fund) fund, schnorrmultisigtx = create_fund_and_spend_tx(out[1], multi=True) fundings.append(fund) fund, ecdsachecksigtx = create_fund_and_spend_tx(out[2], sig='ecdsa') fundings.append(fund) if fakeDER64: fund, DER64checksigtx = create_fund_and_spend_tx(out[5], sig=fakeDER64) fundings.append(fund) fund, DER64multisigtx = create_fund_and_spend_tx(out[6], multi=True, sig=fakeDER64) fundings.append(fund) for fund in fundings: send_transaction_to_mempool(fund) block(1, transactions=fundings) yield accepted() # we're now set up for the various spends; make sure the other node # is set up, too. sync_blocks(self.nodes) # We are before the upgrade, no Schnorrs get in the mempool. assert_raises_rpc_error(-26, RPC_EARLY_SCHNORR_ERROR, node.sendrawtransaction, ToHex(schnorrchecksigtx)) assert_raises_rpc_error(-26, RPC_SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorrmultisigtx)) # And blocks containing them are rejected as well. block(2, transactions=[schnorrchecksigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(1) block(3, transactions=[schnorrmultisigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(1) # So far we were creating blocks well in advance of activation. # Now, start creating blocks that will move mediantime up to near # activation. bfork = block(5555, nTime=GREAT_WALL_START_TIME - 1) yield accepted() sync_blocks(self.nodes) # Create 5 more blocks with timestamps from GREAT_WALL_START_TIME+0 to +4 for i in range(5): block(5200 + i) test.blocks_and_transactions.append([self.tip, True]) yield test # Check we are just before the activation time. assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME - 1) # We are just before the upgrade, still no Schnorrs get in the mempool, assert_raises_rpc_error(-26, RPC_EARLY_SCHNORR_ERROR, node.sendrawtransaction, ToHex(schnorrchecksigtx)) assert_raises_rpc_error(-26, RPC_SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorrmultisigtx)) # ... nor in blocks. block(10, transactions=[schnorrchecksigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5204) block(11, transactions=[schnorrmultisigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5204) # Ensure that sending future-valid schnorr txns is *non-bannable*. check_for_no_ban_on_rejected_tx(schnorrchecksigtx, 16, EARLY_SCHNORR_ERROR) # Ensure that sending schnorrs in multisig *is* bannable. check_for_ban_on_rejected_tx(schnorrmultisigtx) if fakeDER64: # Throw a couple of "valid" 65-byte ECDSA signatures into the # mempool just prior to the activation. faked_checksig_tx_id = send_transaction_to_mempool(DER64checksigtx) faked_multisig_tx_id = send_transaction_to_mempool(DER64multisigtx) # Put a proper ECDSA transaction into the mempool but it won't # be mined... ecdsa_tx_id = send_transaction_to_mempool(ecdsachecksigtx) # Activate the Schnorr! forkblock = block(5556) yield accepted() # We have exactly hit the activation time. assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME) # Make sure ECDSA is still in -- we don't want to lose uninvolved txns # when the upgrade happens. assert ecdsa_tx_id in set(node.getrawmempool()) if fakeDER64: # The 64-byte DER sigs must be ejected. assert faked_checksig_tx_id not in set(node.getrawmempool()) assert faked_multisig_tx_id not in set(node.getrawmempool()) # If we try to re-add them, they fail with non-banning errors. # In CHECKSIG it's invalid Schnorr and hence NULLFAIL. assert_raises_rpc_error(-26, RPC_LATE_DER64_CHECKSIG_ERROR, node.sendrawtransaction, ToHex(DER64checksigtx)) # In CHECKMULTISIG it's invalid length and hence BAD_LENGTH. assert_raises_rpc_error(-26, RPC_LATE_DER64_CHECKMULTISIG_ERROR, node.sendrawtransaction, ToHex(DER64multisigtx)) # And they can't be mined either... block(14, transactions=[DER64checksigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5556) block(15, transactions=[DER64multisigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5556) # Ensure that sending past-valid DER64 txns is *non-bannable*. check_for_no_ban_on_rejected_tx(DER64checksigtx, 16, LATE_DER64_CHECKSIG_ERROR) check_for_no_ban_on_rejected_tx(DER64multisigtx, 16, LATE_DER64_CHECKMULTISIG_ERROR) # The multisig throws a different error now assert_raises_rpc_error(-26, RPC_SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorrmultisigtx)) # And it still can't be mined block(16, transactions=[schnorrmultisigtx]) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5556) # Sending schnorrs in multisig is STILL bannable. check_for_ban_on_rejected_tx(schnorrmultisigtx) # The Schnorr CHECKSIG is now valid schnorr_tx_id = send_transaction_to_mempool(schnorrchecksigtx) # It can also be mined postforkblock = block( 21, transactions=[schnorrchecksigtx, ecdsachecksigtx]) yield accepted() # (we mined the ecdsa tx too) assert schnorr_tx_id not in set(node.getrawmempool()) assert ecdsa_tx_id not in set(node.getrawmempool()) # Ok, now we check if a rewind works properly across the activation. # First, rewind the normal post-fork block. node.invalidateblock(postforkblock.hash) # txes popped back into mempool assert schnorr_tx_id in set(node.getrawmempool()) assert ecdsa_tx_id in set(node.getrawmempool()) # Deactivating upgrade. node.invalidateblock(forkblock.hash) # This should kick out the Schnorr sig, but not the valid ECDSA sig. assert schnorr_tx_id not in set(node.getrawmempool()) assert ecdsa_tx_id in set(node.getrawmempool()) # Check that we also do it properly on deeper rewind. node.reconsiderblock(forkblock.hash) node.reconsiderblock(postforkblock.hash) node.invalidateblock(forkblock.hash) assert schnorr_tx_id not in set(node.getrawmempool()) assert ecdsa_tx_id in set(node.getrawmempool()) # Try an actual reorg (deactivates then activates upgrade in one step) node.reconsiderblock(forkblock.hash) node.reconsiderblock(postforkblock.hash) tip(5204) test = TestInstance(sync_every_block=False) for i in range(3): block(5900 + i) test.blocks_and_transactions.append([self.tip, True]) # Perform the reorg yield test # reorg finishes after the fork assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME + 2) # Schnorr didn't get lost! assert schnorr_tx_id in set(node.getrawmempool()) assert ecdsa_tx_id in set(node.getrawmempool())
def run_test(self): # Connect to node0 node0 = BaseNode() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread node0.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase( height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid # (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Start node1 and node2 with assumevalid so they accept a block with a # bad signature. self.nodes.append(start_node(1, self.options.tmpdir, ["-assumevalid=" + hex(block102.sha256)])) node1 = BaseNode() # connects to node1 connections.append( NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1)) node1.add_connection(connections[1]) node1.wait_for_verack() self.nodes.append(start_node(2, self.options.tmpdir, ["-assumevalid=" + hex(block102.sha256)])) node2 = BaseNode() # connects to node2 connections.append( NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2)) node2.add_connection(connections[2]) node2.wait_for_verack() # send header lists to all three nodes node0.send_header_for_blocks(self.blocks[0:2000]) node0.send_header_for_blocks(self.blocks[2000:]) node1.send_header_for_blocks(self.blocks[0:2000]) node1.send_header_for_blocks(self.blocks[2000:]) node2.send_header_for_blocks(self.blocks[0:200]) # Send 102 blocks to node0. Block 102 will be rejected. for i in range(101): node0.send_message(msg_block(self.blocks[i])) node0.sync_with_ping() # make sure the most recent block is synced node0.send_message(msg_block(self.blocks[101])) assert_equal(self.nodes[0].getblock( self.nodes[0].getbestblockhash())['height'], 101) # Send 3102 blocks to node1. All blocks will be accepted. for i in range(2202): node1.send_message(msg_block(self.blocks[i])) node1.sync_with_ping() # make sure the most recent block is synced assert_equal(self.nodes[1].getblock( self.nodes[1].getbestblockhash())['height'], 2202) # Send 102 blocks to node2. Block 102 will be rejected. for i in range(101): node2.send_message(msg_block(self.blocks[i])) node2.sync_with_ping() # make sure the most recent block is synced node2.send_message(msg_block(self.blocks[101])) assert_equal(self.nodes[2].getblock( self.nodes[2].getbestblockhash())['height'], 101)
class FullBlockTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def __init__(self): self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.block_time = int(time.time())+1 self.tip = None self.blocks = {} def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread sync_masternodes(self.nodes) test.run() def add_transactions_to_block(self, block, tx_list): [ tx.rehash() for tx in tx_list ] block.vtx.extend(tx_list) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() return block # Create a block on top of self.tip, and advance self.tip to point to the new block # if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, # and rest will go to fees. def next_block(self, number, spend=None, additional_coinbase_value=0, script=None): if self.tip == None: base_block_hash = self.genesis_hash else: base_block_hash = self.tip.sha256 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, self.block_time) if (spend != None): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet # This copies the java comparison tool testing behavior: the first # txout has a garbage scriptPubKey, "to make sure we're not # pre-verifying too much" (?) tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL) scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block block = self.add_transactions_to_block(block, [tx]) block.solve() self.tip = block self.block_heights[block.sha256] = height self.block_time += 1 assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previous marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject = None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # add transactions to a block produced by next_block def update_block(block_number, new_transactions): block = self.blocks[block_number] old_hash = block.sha256 self.add_transactions_to_block(block, new_transactions) block.solve() # Update the internal state just like in next_block self.tip = block self.block_heights[block.sha256] = self.block_heights[old_hash] del self.block_heights[old_hash] self.blocks[block_number] = block return block # creates a new block and advances the tip to that block block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(1000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) out0 = get_spendable_output() block(1, spend=out0) save_spendable_output() yield accepted() out1 = get_spendable_output() b2 = block(2, spend=out1) yield accepted() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. tip(1) b3 = block(3, spend=out1) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) out2 = get_spendable_output() block(4, spend=out2) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out2) save_spendable_output() yield rejected() out3 = get_spendable_output() block(6, spend=out3) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out2) yield rejected() out4 = get_spendable_output() block(8, spend=out4) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out4, additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out3) yield rejected() block(11, spend=out4, additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out3) save_spendable_output() #yield TestInstance([[b12, False]]) b13 = block(13, spend=out4) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() out5 = get_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out5, additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1)) tip(13) block(15, spend=out5, script=lots_of_checksigs) yield accepted() # Test that a block with too many checksigs is rejected out6 = get_spendable_output() too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50)) block(16, spend=out6, script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out6) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) out7 = get_spendable_output() block(20, spend=out7) yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out6) yield rejected() block(22, spend=out5) yield rejected() # Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out6) old_hash = b23.sha256 tx = CTransaction() script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE) yield accepted() # Make the next block one byte bigger and check that it fails tip(15) b24 = block(24, spend=out6) script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69 script_output = CScript([b'\x00' * (script_length+1)]) tx.vout = [CTxOut(0, script_output)] b24 = update_block(24, [tx]) assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1) yield rejected(RejectResult(16, b'bad-blk-length')) b25 = block(25, spend=out7) yield rejected() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out6) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = block(27, spend=out7) yield rejected() # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out6) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcoind isn't accepted b28 b29 = block(29, spend=out7) # TODO: Should get a reject message back with "bad-prevblk", except # there's a bug that prevents this from being detected. Just note # failure for now, and add the reject result later. yield rejected() # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted()
def run_test(self): node = self.nodes[0] # Generate 6 keys. rawkeys = [] pubkeys = [] for i in range(6): raw_key = CECKey() raw_key.set_secretbytes(('privkey%d' % i).encode('ascii')) rawkeys.append(raw_key) pubkeys = [CPubKey(key.get_pubkey()) for key in rawkeys] # Create a 4-of-6 multi-sig wallet with CLTV. height = 210 redeem_script = CScript( [CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] + # CLTV (lock_time >= 210) [OP_4] + pubkeys + [OP_6, OP_CHECKMULTISIG]) # multi-sig hex_redeem_script = bytes_to_hex_str(redeem_script) p2sh_address = script_to_p2sh(redeem_script, main=False) # Send 1 coin to the mult-sig wallet. txid = node.sendtoaddress(p2sh_address, 1.0) raw_tx = node.getrawtransaction(txid, True) try: node.importaddress(hex_redeem_script, 'cltv', True, True) except Exception as err: pass assert_equal(sig(node.getreceivedbyaddress(p2sh_address, 0) - Decimal(1.0)), 0) # Mine one block to confirm the transaction. node.generate(1) # block 201 assert_equal(sig(node.getreceivedbyaddress(p2sh_address, 1) - Decimal(1.0)), 0) # Try to spend the coin. addr_to = node.getnewaddress('') # (1) Find the UTXO for vout in raw_tx['vout']: if vout['scriptPubKey']['addresses'] == [p2sh_address]: vout_n = vout['n'] hex_script_pubkey = raw_tx['vout'][vout_n]['scriptPubKey']['hex'] value = raw_tx['vout'][vout_n]['value'] # (2) Create a tx inputs = [{ "txid": txid, "vout": vout_n, "scriptPubKey": hex_script_pubkey, "redeemScript": hex_redeem_script, "amount": value, }] outputs = {addr_to: 0.999} lock_time = height hex_spend_raw_tx = node.createrawtransaction(inputs, outputs, lock_time) hex_funding_raw_tx = node.getrawtransaction(txid, False) # (3) Try to sign the spending tx. tx0 = CTransaction() tx0.deserialize(io.BytesIO(hex_str_to_bytes(hex_funding_raw_tx))) tx1 = CTransaction() tx1.deserialize(io.BytesIO(hex_str_to_bytes(hex_spend_raw_tx))) self.sign_tx(tx1, tx0, vout_n, redeem_script, 0, rawkeys[:4]) # Sign with key[0:4] # Mine some blocks to pass the lock time. node.generate(10) # Spend the CLTV multi-sig coins. raw_tx1 = tx1.serialize() hex_raw_tx1 = bytes_to_hex_str(raw_tx1) node.sendrawtransaction(hex_raw_tx1) # Check the tx is accepted by mempool but not confirmed. assert_equal(sig(node.getreceivedbyaddress(addr_to, 0) - Decimal(0.999)), 0) assert_equal(sig(node.getreceivedbyaddress(addr_to, 1)), 0) # Mine a block to confirm the tx. node.generate(1) assert_equal(sig(node.getreceivedbyaddress(addr_to, 1) - Decimal(0.999)), 0)
class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def __init__(self): super().__init__() self.excessive_block_size = 16 * ONE_MEGABYTE self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} def setup_network(self): self.extra_args = [[ '-debug', '-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-uahfstarttime=%d" % UAHF_START_TIME, "-excessiveblocksize=%d" % self.excessive_block_size ]] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, binary=[self.options.testbinary]) def add_options(self, parser): super().add_options(parser) parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Set the blocksize to 2MB as initial condition self.nodes[0].setexcessiveblock(self.excessive_block_size) self.nodes[0].setmocktime(UAHF_START_TIME) self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[ spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() tx.vin.append( CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet # We put some random data into the first transaction of the chain # to randomize ids tx.vout.append( CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it sighash = SignatureHashForkId( spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) scriptSig = CScript([ self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) ]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 tx_sigops = min(extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops script_output = CScript([b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) tx.vin.append( CTxIn( COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # In order to trigger the HF, we need one block past activation time bfork = block(5555) bfork.nTime = UAHF_START_TIME update_block(5555, []) save_spendable_output() yield accepted() # Then we pile 5 blocks to move MTP forward and trigger the HF for i in range(5): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Create a new block and activate the fork, the block needs # to be > 1MB . For more specific tests about the fork activation, # check abc-p2p-activation.py block(5556, spend=get_spendable_output(), block_size=LEGACY_MAX_BLOCK_SIZE + 1) yield accepted() # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(17) # Accept many sigops lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) block(19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() too_many_blk_checksigs = CScript([OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block(20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block(29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction(out[22].tx, out[22].n, 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Check that compact block also work for big blocks node = self.nodes[0] peer = TestNode() peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)) # Start up network handling in another thread and wait for connection # to be etablished NetworkThread().start() peer.wait_for_verack() # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) got_sendcmpt = wait_until(received_sendcmpct, timeout=30) assert (got_sendcmpt) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) got_getheaders = wait_until(received_getheaders, timeout=30) assert (got_getheaders) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) got_headers = wait_until(received_headers, timeout=30) assert (got_headers) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b33 = block(33, spend=out[24], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) got_cmpctblock = wait_until(received_block, timeout=30) assert (got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert (cmpctblk_header.sha256 == b33.sha256) # Send a bigger block peer.clear_block_data() b34 = block(34, spend=out[25], block_size=8 * ONE_MEGABYTE) yield accepted() # Checks the node to forward it via compact block got_cmpctblock = wait_until(received_block, timeout=30) assert (got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert (cmpctblk_header.sha256 == b34.sha256) # Let's send a compact block and see if the node accepts it. # First, we generate the block and send all transaction to the mempool b35 = block(35, spend=out[26], block_size=8 * ONE_MEGABYTE) for i in range(1, len(b35.vtx)): node.sendrawtransaction(ToHex(b35.vtx[i]), True) # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b35) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert (int(node.getbestblockhash(), 16) == b35.sha256)
class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def __init__(self): super().__init__() self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-excessiveblocksize=%d" % self.excessive_block_size]] def add_options(self, parser): super().add_options(parser) parser.add_option( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Set the blocksize to 2MB as initial condition self.nodes[0].setexcessiveblock(self.excessive_block_size) self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[ spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() # no signature yet tx.vin.append( CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # We put some random data into the first transaction of the chain # to randomize ids tx.vout.append( CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it sighash = SignatureHashForkId( spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 tx_sigops = min( extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops script_output = CScript( [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) tx.vin.append( CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(17) # Accept many sigops lots_of_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) block( 19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() too_many_blk_checksigs = CScript( [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block( 20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block( 29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction( out[22].tx, out[22].n, 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Check that compact block also work for big blocks node = self.nodes[0] peer = TestNode() peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)) # Start up network handling in another thread and wait for connection # to be etablished NetworkThread().start() peer.wait_for_verack() # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) got_sendcmpt = wait_until(received_sendcmpct, timeout=30) assert(got_sendcmpt) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) got_getheaders = wait_until(received_getheaders, timeout=30) assert(got_getheaders) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) got_headers = wait_until(received_headers, timeout=30) assert(got_headers) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b33 = block(33, spend=out[24], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b33.sha256) # Send a bigger block peer.clear_block_data() b34 = block(34, spend=out[25], block_size=8 * ONE_MEGABYTE) yield accepted() # Checks the node to forward it via compact block got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b34.sha256) # Let's send a compact block and see if the node accepts it. # First, we generate the block and send all transaction to the mempool b35 = block(35, spend=out[26], block_size=8 * ONE_MEGABYTE) for i in range(1, len(b35.vtx)): node.sendrawtransaction(ToHex(b35.vtx[i]), True) # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b35) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert(int(node.getbestblockhash(), 16) == b35.sha256)
def run_test (self): self.nodes[2].importprivkey("cTnxkovLhGbp7VRhMhGThYt8WDwviXgaVAD8DjaVa5G5DApwC6tF") # Check that there's 100 UTXOs on each of the nodes assert_equal(len(self.nodes[0].listunspent()), 100) assert_equal(len(self.nodes[1].listunspent()), 100) assert_equal(len(self.nodes[2].listunspent()), 200) walletinfo = self.nodes[2].getbalance() assert_equal(walletinfo["CBT"], 21000000) assert_equal(walletinfo["ISSUANCE"], 500000) print("Mining blocks...") self.nodes[2].generate(101) self.sync_all() asscript = "76a914bc835aff853179fa88f2900f9003bb674e17ed4288ac"; genhash = self.nodes[2].getblockhash(0) genblock = self.nodes[2].getblock(genhash) for txid in genblock["tx"]: rawtx = self.nodes[2].getrawtransaction(txid,True) if "assetlabel" in rawtx["vout"][0]: if rawtx["vout"][0]["assetlabel"] == "ISSUANCE": asasset = rawtx["vout"][0]["asset"] astxid = txid asvalue = rawtx["vout"][0]["value"] assert_equal(self.nodes[0].getbalance("", 0, False, "CBT"), 21000000) assert_equal(self.nodes[1].getbalance("", 0, False, "CBT"), 21000000) assert_equal(self.nodes[2].getbalance("", 0, False, "CBT"), 21000000) #Set all OP_TRUE genesis outputs to single node self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 21000000, "", "", True) self.nodes[0].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance("", 0, False, "CBT"), 21000000) assert_equal(self.nodes[1].getbalance("", 0, False, "CBT"), 0) assert_equal(self.nodes[2].getbalance("", 0, False, "CBT"), 0) #self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1000000) #self.nodes[0].generate(1) #self.sync_all() #self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100000) #self.nodes[0].generate(101) #self.sync_all() #assert_equal(self.nodes[0].getbalance(), 21000000-1100000) #assert_equal(self.nodes[1].getbalance(), 1000000) #assert_equal(self.nodes[2].getbalance(), 100000) # Send 21 BTC from 0 to 2 using sendtoaddress call. txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) txout1v0 = self.nodes[0].gettxout(txid1, 0) rawtx1 = self.nodes[0].getrawtransaction(txid1, 1) #amountcommit1 = rawtx1["vout"][0]["amountcommitment"] assert_equal(txout1v0['confirmations'], 0) assert(not txout1v0['coinbase']) #assert_equal(amountcommit1, txout1v0['amountcommitment']) txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) txout2v0 = self.nodes[0].gettxout(txid2, 0) rawtx2 = self.nodes[0].getrawtransaction(txid2, 1) #amountcommit2 = rawtx2["vout"][0]["amountcommitment"] assert_equal(txout2v0['confirmations'], 0) assert(not txout2v0['coinbase']) #assert_equal(amountcommit2, txout2v0['amountcommitment']) walletinfo = self.nodes[0].getwalletinfo("CBT") assert_equal(walletinfo['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. Confirm previous transactions. self.nodes[0].generate(1) self.sync_all() # Exercise locking of unspent outputs unspent_0 = self.nodes[2].listunspent(1, 9999999, [], True, "CBT")[0] unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) # Have node1 generate 100 blocks (so node0 can recover the fee) self.nodes[1].generate(100) self.sync_all() # node0 should end up with 100 btc in block rewards plus fees, but # minus the 21 plus fees sent to node2 assert_equal(self.nodes[0].getbalance("", 0, False, "CBT"), 21000000-21) assert_equal(self.nodes[2].getbalance("", 0, False, "CBT"), 21) # Node0 should have three spendable outputs since 0-value coinbase outputs will be OP_RETURN. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1, 9999999, [], True, "CBT") assert_equal(len(node0utxos), 3) # create both transactions txns_to_send = [] for utxo in node0utxos: if utxo["amount"] <= 3: # arbitrary value of 3? continue inputs = [] outputs = {} inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs = {self.nodes[2].getnewaddress("from1"): utxo["amount"] - Decimal('1'), "fee": Decimal('1')} raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) raw_tx = self.nodes[0].blindrawtransaction(raw_tx) txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx)) # Have node 1 (miner) send the transaction txid = self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) # Have node1 mine a block to confirm transaction: self.nodes[1].generate(1) self.sync_all() #test creatation of raw multisig issuance transactions #get a new address and public and private key for each node address_node1 = self.nodes[0].getnewaddress() val_addr_node1 = self.nodes[0].validateaddress(address_node1) privkey_node1 = self.nodes[0].dumpprivkey(address_node1) address_node2 =self.nodes[1].getnewaddress() val_addr_node2 = self.nodes[1].validateaddress(address_node2) privkey_node2 =self.nodes[1].dumpprivkey(address_node2) address_node3 =self.nodes[2].getnewaddress() val_addr_node3 = self.nodes[2].validateaddress(address_node3) privkey_node3 =self.nodes[2].dumpprivkey(address_node3) #create 2 of 3 multisig P2SH script and address multisig = self.nodes[0].createmultisig(2,[val_addr_node1["pubkey"],val_addr_node2["pubkey"],val_addr_node3["pubkey"]]) #send some policy asset to the P2SH address pa_txid = self.nodes[2].sendtoaddress(multisig["address"],1,"","",False,asasset) self.nodes[1].generate(1) self.sync_all() #get the vout and scriptPubKey of the multisig output vout = 0 pa_tx = self.nodes[1].getrawtransaction(pa_txid,1) for val in pa_tx["vout"]: for i,j in val.items(): if i == "n": vout_t = j for i,j in val.items(): if i == "scriptPubKey": for i2,j2 in j.items(): if i2 == "hex": script_t = j2 for i2,j2 in j.items(): if(i2 == "type" and j2 == "scripthash"): script_pk = script_t vout = vout_t #get address to send tokens and re-issuance tokens asset_addr = self.nodes[1].getnewaddress() token_addr = self.nodes[1].getnewaddress() #create an unsigned raw issuance transaction issuance_tx = self.nodes[1].createrawissuance(asset_addr,10.0,token_addr,1.0,multisig["address"],1.0000,'1',pa_txid,str(vout)) #node1 partially sign transaction partial_signed = self.nodes[0].signrawtransaction(issuance_tx["rawtx"],[{"txid":pa_txid,"vout":vout,"scriptPubKey":script_pk,"redeemScript":multisig["redeemScript"]}],[privkey_node1]) assert(not partial_signed["complete"]) #node1 partially sign transaction signed_tx = self.nodes[1].signrawtransaction(partial_signed["hex"],[{"txid":pa_txid,"vout":vout,"scriptPubKey":script_pk,"redeemScript":multisig["redeemScript"]}],[privkey_node2]) assert(signed_tx["complete"]) self.nodes[1].generate(2) self.sync_all() #submit signed transaction to network submit = self.nodes[1].sendrawtransaction(signed_tx["hex"]) #confirm transaction accepted by mempool mempool_tx = self.nodes[1].getrawmempool() assert_equal(mempool_tx[0],submit) self.nodes[1].generate(10) self.sync_all() #confirm asset can be spent by node2 wallet asset_addr2 = self.nodes[0].getnewaddress() asset_tx = self.nodes[1].sendtoaddress(asset_addr2,5,' ',' ',False,issuance_tx["asset"],True) mempool1 = self.nodes[1].getrawmempool() assert_equal(mempool1[0],asset_tx) # Test address prefix values returned by getsidechaininfo rpc addr_prefixes = self.nodes[0].getsidechaininfo()["addr_prefixes"] for prefix in addr_prefixes: assert_greater_than_or_equal(int(addr_prefixes[prefix]), 0) assert_greater_than(255, int(addr_prefixes[prefix])) # Test address reconstruction using address prefixes # p2pkh address correctly formed addr = self.nodes[0].getnewaddress() pubkey = self.nodes[0].validateaddress(addr)['pubkey'] pubkey = hex_str_to_bytes(pubkey) assert_equal(addr,byte_to_base58(hash160(pubkey), addr_prefixes['PUBKEY_ADDRESS'])) # p2sh address isvalid? p2sh = byte_to_base58(hash160(CScript([OP_TRUE])), addr_prefixes['SCRIPT_ADDRESS']) assert(self.nodes[0].validateaddress(p2sh)['isvalid']) # priv key = generate new and test if import successful with SECRET_KEY prefix k = CECKey() k.set_compressed(True) pk_bytes = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).digest() pk_bytes = pk_bytes + b'\x01' k.set_secretbytes(pk_bytes) key = byte_to_base58(pk_bytes, addr_prefixes['SECRET_KEY']) assert_equal(self.nodes[0].importprivkey(key), None) # ensure import is successful # test blind prefix - construct expected createblindedaddress() return value and compare multisig_addr = self.nodes[2].createmultisig(2,["0222c31615e457119c2cb33821c150585c8b6a571a511d3cd07d27e7571e02c76e", "039bac374a8cd040ed137d0ce837708864e70012ad5766030aee1eb2f067b43d7f"])['address'] # blinding pubkey blinded_pubkey = self.nodes[2].validateaddress(self.nodes[2].getnewaddress())['pubkey'] blinded_addr = self.nodes[2].createblindedaddress(multisig_addr,blinded_pubkey) conf_addr_prefix = hex(addr_prefixes['BLINDED_ADDRESS'])[2:] if len(hex(addr_prefixes['BLINDED_ADDRESS'])[2:]) == 2 else '0' + str(hex(addr_prefixes['BLINDED_ADDRESS'])[2:]) secret_key_prefix = hex(addr_prefixes['SCRIPT_ADDRESS'])[2:] if len(hex(addr_prefixes['SCRIPT_ADDRESS'])[2:]) == 2 else '0' + str(hex(addr_prefixes['SCRIPT_ADDRESS'])[:2]) # construct expected createblindedaddress() return value expected_addr_bytes = \ str(conf_addr_prefix) + \ str(secret_key_prefix) + \ str(blinded_pubkey) + \ base58_to_bytes(multisig_addr)[2:] assert_equal(expected_addr_bytes,base58_to_bytes(blinded_addr)) ###################################################################### #################### END OF WORKING TESTS ########################### ###################################################################### return #TODO fix the rest txoutv0 = self.nodes[0].gettxout(txid, 0) assert_equal(txoutv0['confirmations'], 1) assert(not txoutv0['coinbase']) assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 94) assert_equal(self.nodes[2].getbalance("from1"), 94-21) # Send 10 BTC normal address = self.nodes[0].getnewaddress("test") fee_per_byte = Decimal('0.001') / 1000 self.nodes[2].settxfee(fee_per_byte * 1000) txid = self.nodes[2].sendtoaddress(address, 10, "", "", False) self.nodes[2].generate(1) self.sync_all() node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), Decimal('10')) # Send 10 BTC with subtract fee from amount txid = self.nodes[2].sendtoaddress(address, 10, "", "", True) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Sendmany 10 BTC txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [], {'fee': 'CBT'}) self.nodes[2].generate(1) self.sync_all() node_0_bal += Decimal('10') node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), node_0_bal) # Sendmany 10 BTC with subtract fee from amount txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address], {'fee': 'CBT'}) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Test ResendWalletTransactions: # Create a couple of transactions, then start up a fourth # node (nodes[3]) and ask nodes[0] to rebroadcast. # EXPECT: nodes[3] should have those transactions in its mempool. txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) sync_mempools(self.nodes) self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3])) connect_nodes_bi(self.nodes, 0, 3) sync_blocks(self.nodes) relayed = self.nodes[0].resendwallettransactions() assert_equal(set(relayed), {txid1, txid2}) sync_mempools(self.nodes) assert(txid1 in self.nodes[3].getrawmempool()) # Exercise balance rpcs assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) assert_equal(self.nodes[0].getunconfirmedbalance(), 1) #check if we can list zero value tx as available coins #1. create rawtx #2. hex-changed one output to 0.0 #3. sign and send #4. check if recipient (node0) can list the zero value tx usp = self.nodes[1].listunspent() inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}] outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32) decRawTx = self.nodes[1].decoderawtransaction(rawTx) signedRawTx = self.nodes[1].signrawtransaction(rawTx) decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) zeroValueTxid= decRawTx['txid'] sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex']) self.sync_all() self.nodes[1].generate(1) #mine a block self.sync_all() unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output found = False for uTx in unspentTxs: if uTx['txid'] == zeroValueTxid: found = True assert_equal(uTx['amount'], Decimal('0')) assert(found) #do some -walletbroadcast tests stop_nodes(self.nodes) self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]]) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.sync_all() txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) self.nodes[1].generate(1) #mine a block, tx should not be in there self.sync_all() assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted #now broadcast from another node, mine a block, sync, and check the balance self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) self.nodes[1].generate(1) self.sync_all() node_2_bal += 2 txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) assert_equal(self.nodes[2].getbalance(), node_2_bal) #create another tx txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) #restart the nodes with -walletbroadcast=1 stop_nodes(self.nodes) self.nodes = start_nodes(3, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) sync_blocks(self.nodes) self.nodes[0].generate(1) sync_blocks(self.nodes) node_2_bal += 2 #tx should be added to balance because after restarting the nodes tx should be broadcastet assert_equal(self.nodes[2].getbalance(), node_2_bal) #send a tx with value in a string (PR#6380 +) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-2')) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) #check if JSON parser can handle scientific notation in strings txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) try: txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4") except JSONRPCException as e: assert("Invalid amount" in e.error['message']) else: raise AssertionError("Must not parse invalid amounts") try: self.nodes[0].generate("2") raise AssertionError("Must not accept strings as numeric") except JSONRPCException as e: assert("not an integer" in e.error['message']) # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() txid = self.nodes[0].sendtoaddress(address_to_import, 1) self.nodes[0].generate(1) self.sync_all() # 2. Import address from node2 to node1 self.nodes[1].importaddress(address_to_import) # 3. Validate that the imported address is watch-only on node1 assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"]) # 4. Check that the unspents after import are not spendable assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": False}) # 5. Import private key of the previously imported address on node1 priv_key = self.nodes[2].dumpprivkey(address_to_import) self.nodes[1].importprivkey(priv_key) # 6. Check that the unspents are now spendable on node1 assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": True}) # Mine a block from node0 to an address from node1 cbAddr = self.nodes[1].getnewaddress() blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] self.sync_all() # Check that the txid and balance is found by node1 self.nodes[1].gettransaction(cbTxId) # check if wallet or blockchain maintenance changes the balance self.sync_all() blocks = self.nodes[0].generate(2) self.sync_all() balance_nodes = [self.nodes[i].getbalance() for i in range(3)] block_count = self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... # - False: unicode directly as UTF-8 for mode in [True, False]: self.nodes[0].ensure_ascii = mode # unicode check: Basic Multilingual Plane, Supplementary Plane respectively for s in [u'ббаБаА', u'№ Ё']: addr = self.nodes[0].getaccountaddress(s) label = self.nodes[0].getaccount(addr) assert_equal(label, s) assert(s in self.nodes[0].listaccounts().keys()) self.nodes[0].ensure_ascii = True # restore to default # maintenance tests maintenance = [ '-rescan', '-reindex', '-zapwallettxes=1', '-zapwallettxes=2', # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 # '-salvagewallet', ] chainlimit = 6 for m in maintenance: print("check " + m) stop_nodes(self.nodes) # set lower ancestor limit for later self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3) while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: # reindex will leave rpc warm up "early"; Wait for it to finish time.sleep(0.1) assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) assert_equal(coinbase_tx_1["lastblock"], blocks[1]) assert_equal(len(coinbase_tx_1["transactions"]), 1) assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0) # ==Check that wallet prefers to use coins that don't exceed mempool limits ===== # Get all non-zero utxos together chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.nodes[0].generate(1) node0_balance = self.nodes[0].getbalance() # Split into two chains rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) signedtx = self.nodes[0].signrawtransaction(rawtx) singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) # Make a long chain of unconfirmed payments without hitting mempool limit # Each tx we make leaves only one output of change on a chain 1 longer # Since the amount to send is always much less than the outputs, we only ever need one output # So we should be able to generate exactly chainlimit txs for each original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] for i in range(chainlimit*2): txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2) assert_equal(len(txid_list), chainlimit*2) # Without walletrejectlongchains, we will still generate a txid # The tx will be stored in the wallet but not accepted to the mempool extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')) assert(extra_txid not in self.nodes[0].getrawmempool()) assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]) self.nodes[0].abandontransaction(extra_txid) total_txs = len(self.nodes[0].listtransactions("*",99999)) # Try with walletrejectlongchains # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf stop_node(self.nodes[0],0) self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)]) # wait for loadmempool timeout = 10 while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2): time.sleep(0.5) timeout -= 0.5 assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2) node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in our wallet. assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) # Verify nothing new in wallet assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): [tx.rehash() for tx in new_transactions] block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block node = self.nodes[0] # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"replayprotection") public_key = private_key.get_pubkey() # This is a little handier to use than the version in blocktools.py def create_fund_and_spend_tx(spend, forkvalue=0): # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_transaction( spend.tx, spend.n, b'', 50 * COIN, script) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId( script, txspend, 0, sighashtype, 50 * COIN) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() return [txfund, txspend] def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert(tx_id in set(node.getrawmempool())) return tx_id # Before the fork, no replay protection required to get in the mempool. txns = create_fund_and_spend_tx(out[0]) send_transaction_to_mempool(txns[0]) send_transaction_to_mempool(txns[1]) # And txns get mined in a block properly. block(1) update_block(1, txns) yield accepted() # Replay protected transactions are rejected. replay_txns = create_fund_and_spend_tx(out[1], 0xffdead) send_transaction_to_mempool(replay_txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) # And block containing them are rejected as well. block(2) update_block(2, replay_txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(1) # Create a block that would activate the replay protection. bfork = block(5555) bfork.nTime = REPLAY_PROTECTION_START_TIME - 1 update_block(5555, []) yield accepted() for i in range(5): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) yield test # Check we are just before the activation time assert_equal(node.getblockheader(node.getbestblockhash())['mediantime'], REPLAY_PROTECTION_START_TIME - 1) # We are just before the fork, replay protected txns still are rejected assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) block(3) update_block(3, replay_txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5104) # Send some non replay protected txns in the mempool to check # they get cleaned at activation. txns = create_fund_and_spend_tx(out[2]) send_transaction_to_mempool(txns[0]) tx_id = send_transaction_to_mempool(txns[1]) # Activate the replay protection block(5556) yield accepted() # Non replay protected transactions are not valid anymore, # so they should be removed from the mempool. assert(tx_id not in set(node.getrawmempool())) # Good old transactions are now invalid. send_transaction_to_mempool(txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(txns[1])) # They also cannot be mined block(4) update_block(4, txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5556) # The replay protected transaction is now valid send_transaction_to_mempool(replay_txns[0]) replay_tx_id = send_transaction_to_mempool(replay_txns[1]) # They also can also be mined b5 = block(5) update_block(5, replay_txns) yield accepted() # Ok, now we check if a reorg work properly accross the activation. postforkblockid = node.getbestblockhash() node.invalidateblock(postforkblockid) assert(replay_tx_id in set(node.getrawmempool())) # Deactivating replay protection. forkblockid = node.getbestblockhash() node.invalidateblock(forkblockid) assert(replay_tx_id not in set(node.getrawmempool())) # Check that we also do it properly on deeper reorg. node.reconsiderblock(forkblockid) node.reconsiderblock(postforkblockid) node.invalidateblock(forkblockid) assert(replay_tx_id not in set(node.getrawmempool()))
class FullBlockTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def __init__(self): self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"horsebattery") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.block_time = int(time.time())+1 self.tip = None self.blocks = {} def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread test.run() def add_transactions_to_block(self, block, tx_list): [ tx.rehash() for tx in tx_list ] block.vtx.extend(tx_list) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() return block # Create a block on top of self.tip, and advance self.tip to point to the new block # if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, # and rest will go to fees. def next_block(self, number, spend=None, additional_coinbase_value=0, script=None): if self.tip == None: base_block_hash = self.genesis_hash else: base_block_hash = self.tip.sha256 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, self.block_time) if (spend != None): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet # This copies the java comparison tool testing behavior: the first # txout has a garbage scriptPubKey, "to make sure we're not # pre-verifying too much" (?) tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL) scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block block = self.add_transactions_to_block(block, [tx]) block.solve() self.tip = block self.block_heights[block.sha256] = height self.block_time += 1 assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previous marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject = None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # add transactions to a block produced by next_block def update_block(block_number, new_transactions): block = self.blocks[block_number] old_hash = block.sha256 self.add_transactions_to_block(block, new_transactions) block.solve() # Update the internal state just like in next_block self.tip = block self.block_heights[block.sha256] = self.block_heights[old_hash] del self.block_heights[old_hash] self.blocks[block_number] = block return block # creates a new block and advances the tip to that block block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(1000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) out0 = get_spendable_output() block(1, spend=out0) save_spendable_output() yield accepted() out1 = get_spendable_output() b2 = block(2, spend=out1) yield accepted() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. tip(1) b3 = block(3, spend=out1) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) out2 = get_spendable_output() block(4, spend=out2) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out2) save_spendable_output() yield rejected() out3 = get_spendable_output() block(6, spend=out3) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out2) yield rejected() out4 = get_spendable_output() block(8, spend=out4) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out4, additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out3) yield rejected() block(11, spend=out4, additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out3) save_spendable_output() #yield TestInstance([[b12, False]]) b13 = block(13, spend=out4) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() out5 = get_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out5, additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1)) tip(13) block(15, spend=out5, script=lots_of_checksigs) yield accepted() # Test that a block with too many checksigs is rejected out6 = get_spendable_output() too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50)) block(16, spend=out6, script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out6) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) out7 = get_spendable_output() block(20, spend=out7) yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out6) yield rejected() block(22, spend=out5) yield rejected() # Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out6) old_hash = b23.sha256 tx = CTransaction() script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE) yield accepted() # Make the next block one byte bigger and check that it fails tip(15) b24 = block(24, spend=out6) script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69 script_output = CScript([b'\x00' * (script_length+1)]) tx.vout = [CTxOut(0, script_output)] b24 = update_block(24, [tx]) assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1) yield rejected(RejectResult(16, b'bad-blk-length')) b25 = block(25, spend=out7) yield rejected() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out6) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcreditd isn't accepting b26 b27 = block(27, spend=out7) yield rejected() # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out6) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcreditd isn't accepted b28 b29 = block(29, spend=out7) # TODO: Should get a reject message back with "bad-prevblk", except # there's a bug that prevents this from being detected. Just note # failure for now, and add the reject result later. yield rejected() # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted()
def get_tests(self): node = self.nodes[0] self.chain.set_genesis_hash(int(node.getbestblockhash(), 16)) # shorthand for functions block = self.chain.next_block # Create a new block block(0) self.chain.save_spendable_output() yield self.accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.chain.tip, True]) self.chain.save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(self.chain.get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE // 2) yield self.accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield self.accepted() # Oversized blocks will cause us to be disconnected assert (not self.test.test_nodes[0].closed) block(18, spend=out[17], block_size=self.excessive_block_size + 1) self.test.connections[0].send_message(msg_block((self.chain.tip))) self.test.wait_for_disconnections() assert (self.test.test_nodes[0].closed) # Rewind bad block and remake connection to node self.chain.set_tip(17) self.restart_network() self.test.wait_for_verack() # Accept many sigops lots_of_checksigs = CScript([OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block(19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield self.accepted() block(20, spend=out[18], script=lots_of_checksigs, block_size=ONE_MEGABYTE, extra_sigops=1) yield self.rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block self.chain.set_tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield self.accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield self.accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield self.rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block self.chain.set_tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield self.rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block self.chain.set_tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield self.accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield self.accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield self.rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block self.chain.set_tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield self.rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block self.chain.set_tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block(29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield self.rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block self.chain.set_tip(26) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"fatstacks") public_key = private_key.get_pubkey() # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([public_key] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.chain.create_tx_with_script(out[22], 1, p2sh_script) # Add the transaction to the block block(30) self.chain.update_block(30, [p2sh_tx]) yield self.accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) self.chain.update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield self.rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block self.chain.set_tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) self.chain.update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield self.accepted() # Submit a very large block via RPC large_block = block(33, spend=out[24], block_size=self.excessive_block_size) node.submitblock(ToHex(large_block))
def run_test(self): p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) tx.calc_sha256() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 self.nodes[0].disconnect_p2ps() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101)
def get_tests(self): node = self.nodes[0] self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE // 2) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Oversized blocks will cause us to be disconnected assert (not self.test.test_nodes[0].closed) block(18, spend=out[17], block_size=self.excessive_block_size + 1) self.test.connections[0].send_message(msg_block((self.tip))) self.test.wait_for_disconnections() assert (self.test.test_nodes[0].closed) # Rewind bad block and remake connection to node tip(17) self.test.clear_all_connections() self.test.add_all_connections(self.nodes) NetworkThread().start() self.test.wait_for_verack() # Accept many sigops lots_of_checksigs = CScript([OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block(19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() block(20, spend=out[18], script=lots_of_checksigs, block_size=ONE_MEGABYTE, extra_sigops=1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block(29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"fatstacks") public_key = private_key.get_pubkey() # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([public_key] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_tx(out[22], 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Submit a very large block via RPC large_block = block(33, spend=out[24], block_size=self.excessive_block_size) node.submitblock(ToHex(large_block))
def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): [tx.rehash() for tx in new_transactions] block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block node = self.nodes[0] # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Generate a key pair to test P2SH sigops count private_key = CECKey() private_key.set_secretbytes(b"replayprotection") public_key = private_key.get_pubkey() # This is a little handier to use than the version in blocktools.py def create_fund_and_spend_tx(spend, forkvalue=0): # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_transaction(spend.tx, spend.n, b'', 50 * COIN, script) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId(script, txspend, 0, sighashtype, 50 * COIN) sig = private_key.sign(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() return [txfund, txspend] def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert (tx_id in set(node.getrawmempool())) return tx_id # Before the fork, no replay protection required to get in the mempool. txns = create_fund_and_spend_tx(out[0]) send_transaction_to_mempool(txns[0]) send_transaction_to_mempool(txns[1]) # And txns get mined in a block properly. block(1) update_block(1, txns) yield accepted() # Replay protected transactions are rejected. replay_txns = create_fund_and_spend_tx(out[1], 0xffdead) send_transaction_to_mempool(replay_txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) # And block containing them are rejected as well. block(2) update_block(2, replay_txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(1) # Create a block that would activate the replay protection. bfork = block(5555) bfork.nTime = REPLAY_PROTECTION_START_TIME - 1 update_block(5555, []) yield accepted() for i in range(5): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) yield test # Check we are just before the activation time assert_equal( node.getblockheader(node.getbestblockhash())['mediantime'], REPLAY_PROTECTION_START_TIME - 1) # We are just before the fork, replay protected txns still are rejected assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) block(3) update_block(3, replay_txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5104) # Send some non replay protected txns in the mempool to check # they get cleaned at activation. txns = create_fund_and_spend_tx(out[2]) send_transaction_to_mempool(txns[0]) tx_id = send_transaction_to_mempool(txns[1]) assert (tx_id in set(node.getrawmempool())) # Activate the replay protection block(5556) yield accepted() # At activation the entire mempool is cleared, so the txn we inserted # earlier will have gone. assert (tx_id not in set(node.getrawmempool())) # Good old transactions are still valid tx_id = send_transaction_to_mempool(txns[0]) assert (tx_id in set(node.getrawmempool())) # They also can still be mined block(4) update_block(4, txns) yield accepted() # The replay protected transaction is still invalid send_transaction_to_mempool(replay_txns[0]) assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(replay_txns[1])) # They also still can't be mined b5 = block(5) update_block(5, replay_txns) yield rejected(RejectResult(16, b'blk-bad-inputs')) # Rewind bad block tip(5556) # These next few tests look a bit pointless to me since over the activation # we completely wipe the mempool, but hey-ho I guess they're only # temporary. # Ok, now we check if a reorg work properly accross the activation. postforkblockid = node.getbestblockhash() node.invalidateblock(postforkblockid) assert (tx_id in set(node.getrawmempool())) # Deactivating replay protection. forkblockid = node.getbestblockhash() node.invalidateblock(forkblockid) assert (tx_id not in set(node.getrawmempool())) # Check that we also do it properly on deeper reorg. node.reconsiderblock(forkblockid) node.reconsiderblock(postforkblockid) node.invalidateblock(forkblockid) assert (tx_id not in set(node.getrawmempool()))
def run_test(self): # Generate enough blocks to trigger certain block votes self.nodes[0].generate(1150) self.sync_all() logging.info("not on chain tip") badtip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) cur_time = int(time.time()) self.nodes[0].setmocktime(cur_time) self.nodes[1].setmocktime(cur_time) block = create_block(badtip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: does not build on chain tip") logging.info("time too far in the past") block = create_block(tip, coinbase, cur_time) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: time-too-old") logging.info("time too far in the future") block = create_block(tip, coinbase, cur_time + 10000000) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: time-too-new") logging.info("bad version 1") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 1 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad version 2") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 2 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad version 3") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 3 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-version") logging.info("bad coinbase height") tip = int(self.nodes[0].getblockhash(height), 16) block = create_block(tip, create_coinbase(height), cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate( hexblk), JSONRPCException, "invalid block: bad-cb-height") logging.info("bad merkle root") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.hashMerkleRoot = 0x12345678 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txnmrklroot") logging.info("no tx") block = create_block(tip, None, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-blk-length") logging.info("good block") block = create_block(tip, coinbase, cur_time + 600) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) # ------ self.nodes[0].validateblocktemplate(hexblk) block.solve() hexblk = ToHex(block) self.nodes[0].submitblock(hexblk) self.sync_all() prev_block = block # out_value is less than 50BTC because regtest halvings happen every 150 blocks, and is in Satoshis out_value = block.vtx[0].vout[0].nValue tx1 = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 2), int(out_value / 2)]) height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = cur_time + 1200 logging.info("no coinbase") block = create_block(tip, None, next_time, [tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-cb-missing") logging.info("double coinbase") coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() coinbase2 = create_coinbase(height + 1, coinbase_pubkey) block = create_block(tip, coinbase, next_time, [coinbase2, tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-cb-multiple") logging.info("premature coinbase spend") block = create_block(tip, coinbase, next_time, [tx1]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-premature-spend-of-coinbase") self.nodes[0].generate(100) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = cur_time + 1200 logging.info("inputs below outputs") tx6 = create_transaction(prev_block.vtx[0], 0, b'\x51', [out_value + 1000]) block = create_block(tip, coinbase, next_time, [tx6]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-in-belowout") tx5 = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(21000001 * COIN)]) logging.info("money range") block = create_block(tip, coinbase, next_time, [tx5]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-vout-toolarge") logging.info("bad tx offset") tx_bad = create_broken_transaction(prev_block.vtx[0], 1, b'\x51', [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx_bad]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") logging.info("bad tx offset largest number") tx_bad = create_broken_transaction(prev_block.vtx[0], 0xffffffff, b'\x51', [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx_bad]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") logging.info("double tx") tx2 = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 4)]) block = create_block(tip, coinbase, next_time, [tx2, tx2]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") tx3 = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 9), int(out_value / 10)]) tx4 = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 8), int(out_value / 7)]) logging.info("double spend") block = create_block(tip, coinbase, next_time, [tx3, tx4]) block.nVersion = 0x20000000 block.rehash() hexblk = ToHex(block) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-txns-inputs-missingorspent") tx_good = create_transaction(prev_block.vtx[0], 0, b'\x51', [int(out_value / 50)] * 50) logging.info("good tx") block = create_block(tip, coinbase, next_time, [tx_good]) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) self.nodes[0].validateblocktemplate(hexblk) self.nodes[0].submitblock(hexblk) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = next_time + 600 coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() coinbase3 = create_coinbase(height + 1, coinbase_pubkey) txl = [] for i in range(0, 50): ov = block.vtx[1].vout[i].nValue txl.append(create_transaction(block.vtx[1], i, b'\x51', [int(ov / 50)] * 50)) block = create_block(tip, coinbase, next_time, txl) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) for n in self.nodes: n.validateblocktemplate(hexblk) logging.info("excessive") self.nodes[0].setminingmaxblock(1000) self.nodes[0].setexcessiveblock(1000, 12) expectException(lambda: self.nodes[0].validateblocktemplate(hexblk), JSONRPCException, "invalid block: excessive") self.nodes[0].setexcessiveblock(16 * 1000 * 1000, 12) self.nodes[0].setminingmaxblock(1000 * 1000) for it in range(0, 100): # if (it&1023)==0: print(it) h2 = hexblk pos = random.randint(0, len(hexblk)) val = random.randint(0, 15) h3 = h2[:pos] + ('%x' % val) + h2[pos + 1:] try: self.nodes[0].validateblocktemplate(h3) except JSONRPCException as e: if not (e.error["code"] == -1 or e.error["code"] == -22): print(str(e)) # its ok we expect garbage self.nodes[1].submitblock(hexblk) self.sync_all() height = self.nodes[0].getblockcount() tip = int(self.nodes[0].getblockhash(height), 16) coinbase = create_coinbase(height + 1) next_time = next_time + 600 prev_block = block txl = [] for tx in prev_block.vtx: for outp in range(0, len(tx.vout)): ov = tx.vout[outp].nValue txl.append(create_transaction(tx, outp, CScript([OP_CHECKSIG] * 100), [int(ov / 2)] * 2)) block = create_block(tip, coinbase, next_time, txl) block.nVersion = 0x20000000 block.rehash() block.solve() hexblk = ToHex(block) for n in self.nodes: expectException(lambda: n.validateblocktemplate(hexblk), JSONRPCException, "invalid block: bad-blk-sigops")