Example #1
0
 def transact_and_mine(self, numblocks, mining_node):
     min_fee = Decimal("0.0100000")
     # We will now mine numblocks blocks generating on average 100 transactions between each block
     # We shuffle our confirmed txout set before each set of transactions
     # small_tx_puzzle_rand_fee will use the transactions that have inputs already in the chain when possible
     # resorting to tx's that depend on the mempool when those run out
     for _ in range(numblocks):
         random.shuffle(self.confutxo)
         for _ in range(random.randrange(100 - 50, 100 + 50)):
             from_index = random.randint(1, 2)
             (txhex,
              fee) = small_tx_puzzle_rand_fee(self.nodes[from_index],
                                              self.confutxo, self.memutxo,
                                              Decimal("0.005"), min_fee,
                                              min_fee)
             tx_kbytes = (len(txhex) // 2) / 1000.0
             self.fees_per_kb.append(float(fee) / tx_kbytes)
         sync_mempools(self.nodes[0:3], wait=.1)
         mined = mining_node.getblock(mining_node.generate(1)[0],
                                      True)["tx"]
         sync_blocks(self.nodes[0:3], wait=.1)
         # update which txouts are confirmed
         newmem = []
         for utx in self.memutxo:
             if utx["txid"] in mined:
                 self.confutxo.append(utx)
             else:
                 newmem.append(utx)
         self.memutxo = newmem
    def test_getblocktxn_handler(node, test_node, version):
        # Krond will not send blocktxn responses for blocks whose height is
        # more than 10 blocks deep.
        MAX_GETBLOCKTXN_DEPTH = 10
        chain_height = node.getblockcount()
        current_height = chain_height
        while current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH:
            block_hash = node.getblockhash(current_height)
            block = from_hex(CBlock(), node.getblock(block_hash, False))

            msg = MsgGetBlockTxn()
            msg.block_txn_request = BlockTransactionsRequest(
                int(block_hash, 16), [])
            num_to_request = random.randint(1, len(block.vtx))
            msg.block_txn_request.from_absolute(
                sorted(random.sample(range(len(block.vtx)), num_to_request)))
            test_node.send_message(msg)
            wait_until(lambda: "blocktxn" in test_node.last_message,
                       timeout=10,
                       lock=mininode_lock,
                       err_msg="test_getblocktxn_handler")

            [tx.calc_x16r() for tx in block.vtx]
            with mininode_lock:
                assert_equal(
                    test_node.last_message["blocktxn"].block_transactions.
                    blockhash, int(block_hash, 16))
                all_indices = msg.block_txn_request.to_absolute()
                for index in all_indices:
                    tx = test_node.last_message[
                        "blocktxn"].block_transactions.transactions.pop(0)
                    tx.calc_x16r()
                    assert_equal(tx.sha256, block.vtx[index].sha256)
                    if version == 1:
                        # Witnesses should have been stripped
                        assert (tx.wit.is_null())
                    else:
                        # Check that the witness matches
                        assert_equal(tx.calc_x16r(True),
                                     block.vtx[index].calc_x16r(True))
                test_node.last_message.pop("blocktxn", None)
            current_height -= 1

        # Next request should send a full block response, as we're past the
        # allowed depth for a blocktxn response.
        block_hash = node.getblockhash(current_height)
        # noinspection PyUnboundLocalVariable
        msg.block_txn_request = BlockTransactionsRequest(
            int(block_hash, 16), [0])
        with mininode_lock:
            test_node.last_message.pop("block", None)
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            test_node.last_message["block"].block.calc_x16r()
            assert_equal(test_node.last_message["block"].block.sha256,
                         int(block_hash, 16))
            assert "blocktxn" not in test_node.last_message
Example #3
0
def small_tx_puzzle_rand_fee(from_node, conflist, unconflist, amount, min_fee,
                             fee_increment):
    """
    Create and send a transaction with a random fee.
    The transaction pays to a trivial P2SH script, and assumes that its inputs
    are of the same form.
    The function takes a list of confirmed outputs and unconfirmed outputs
    and attempts to use the confirmed list first for its inputs.
    It adds the newly created outputs to the unconfirmed list.
    Returns (raw transaction, fee)
    """
    # It's best to exponentially distribute our random fees
    # because the buckets are exponentially spaced.
    # Exponentially distributed from 1-128 * fee_increment
    rand_fee = float(fee_increment) * (1.1892**random.randint(0, 28))
    # Total fee ranges from min_fee to min_fee + 127*fee_increment
    fee = min_fee - fee_increment + satoshi_round(rand_fee)
    tx = CTransaction()
    total_in = Decimal("0.00000000")
    while total_in <= (amount + fee) and len(conflist) > 0:
        t = conflist.pop(0)
        total_in += t["amount"]
        tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
    if total_in <= amount + fee:
        while total_in <= (amount + fee) and len(unconflist) > 0:
            t = unconflist.pop(0)
            total_in += t["amount"]
            tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
        if total_in <= amount + fee:
            raise RuntimeError("Insufficient funds: need %d, have %d" %
                               (amount + fee, total_in))
    tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
    tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
    # These transactions don't need to be signed, but we still have to insert
    # the ScriptSig that will satisfy the ScriptPubKey.
    for inp in tx.vin:
        inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
    txid = from_node.sendrawtransaction(to_hex(tx), True)
    unconflist.append({
        "txid": txid,
        "vout": 0,
        "amount": total_in - amount - fee
    })
    unconflist.append({"txid": txid, "vout": 1, "amount": amount})

    return to_hex(tx), fee
Example #4
0
    def run_test(self):
        self.log.info("Mining 500 blocks...")
        self.nodes[0].generate(500)
        self.sync_all()

        assert_equal(self.nodes[1].getblockcount(), 500)
        assert_equal(self.nodes[1].getbalance(), 0)

        # Create and send two transactions
        tx1_in = self.nodes[0].listunspent().pop()
        tx1_out = tx1_in["amount"] - Decimal("0.01")
        tx1 = self.nodes[0].createrawtransaction([tx1_in], {self.nodes[1].getnewaddress(): tx1_out})
        txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
        tx2_in = self.nodes[0].listunspent().pop()
        tx2_out = tx2_in["amount"] - Decimal("0.01")
        tx2 = self.nodes[0].createrawtransaction([tx2_in], {self.nodes[1].getnewaddress(): tx2_out})
        txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])

        # Try to get proof for one of the trasaction - should fail because transaction is not yet in a block
        assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof, txid1)
        assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof2, "", txid1)

        # Mine a new block
        self.log.info("Mining 501st block...")
        self.nodes[0].generate(1)
        self.sync_all()
        height_of_block_501 = self.nodes[1].getblockcount()

        # Check some negative tests on verifymerkleproof
        assert_raises_rpc_error(-8, "\"flags\" must be a numeric value", self.nodes[0].verifymerkleproof, {'flags': '2'})
        assert_raises_rpc_error(-8, "verifymerkleproof only supports \"flags\" with value 2", self.nodes[0].verifymerkleproof, {'flags': 1})
        assert_raises_rpc_error(-8, "\"nodes\" must be a Json array", self.nodes[0].verifymerkleproof, 
            {'flags':2,
             'index':4,
             'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
             'target':{'merkleroot':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'},
             'nodes':'*'})
        assert_raises_rpc_error(-8, "\"node\" must be a \"hash\" or \"*\"", self.nodes[0].verifymerkleproof, 
            {'flags':2,
             'index':4,
             'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
             'target':{'merkleroot':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'},
             'nodes':[2]})
        assert_raises_rpc_error(-8, "node must be of length 64 (not 10)", self.nodes[0].verifymerkleproof, 
            {'flags':2,
             'index':4,
             'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
             'target':{'merkleroot':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'},
             'nodes':['*','abcdef1234']})

        # Get proof for 1st and 2nd transaction and verify that calculated roots are the same as block's merkle root
        hash_of_block_501 = self.nodes[0].getblockhash(height_of_block_501)
        self.verify_merkle_proof(txid1, hash_of_block_501, 0)
        self.verify_merkle_proof(txid2, hash_of_block_501, 0)

        # Create and send 3rd transaction
        tx_spent = self.nodes[1].listunspent().pop()
        tx3_out = tx_spent["amount"] - Decimal("0.01")
        tx3 = self.nodes[1].createrawtransaction([tx_spent], {self.nodes[0].getnewaddress(): tx3_out})
        txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])

        # Mine a new block
        self.log.info("Mining 502nd block...")
        self.nodes[0].generate(1)
        self.sync_all()

        # Get id of spent and unspent transaction
        txid_spent = tx_spent["txid"]
        txid_unspent = txid1 if txid_spent != txid1 else txid2

        # We can't find the block if transaction was spent because -txindex is not set on node[0]
        assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof, txid_spent)
        assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof2, "", txid_spent)

        # We can get the proof if we specify proper block hash
        a = self.nodes[0].getmerkleproof(txid_spent, hash_of_block_501)
        b = self.nodes[0].getmerkleproof2(hash_of_block_501, txid_spent)
        assert self.nodes[0].verifymerkleproof(a)
        assert(self.check_equivalence(a,b))

        # We can't get the proof if we specify a non-existent block
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getmerkleproof,  txid_spent, "1234567890abcdef1234567890abcdef")
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getmerkleproof2,  "1234567890abcdef1234567890abcdef", txid_spent)

        # We can get the proof if the transaction is unspent
        self.verify_merkle_proof(txid_unspent, hash_of_block_501, 0)

        # We can get a proof of a spent transaction without block hash if node runs with -txindex (nodes[1] in this case)
        self.verify_merkle_proof(txid_spent, hash_of_block_501, 1)

        # Restart nodes
        self.log.info("Restarting nodes...")
        self.stop_nodes()
        self.start_nodes(self.extra_args)

        # Repeat tests after nodes restart
        self.verify_merkle_proof(txid_unspent, hash_of_block_501, 0)
        self.verify_merkle_proof(txid_spent, hash_of_block_501, 1)
        hash_of_block_502 = self.nodes[0].getblockhash(height_of_block_501 + 1)
        self.verify_merkle_proof(txid3, hash_of_block_502, 0)

        # Create more blocks to get utxos
        self.log.info("Mining additional 1500 blocks...")
        self.nodes[0].generate(1500)
        sync_blocks(self.nodes[0:1])

        # Use all utxos and create more Merkle Trees
        # We create blocks with max 400 transactions (~25 kB for biggest Merkle Tree)
        self.log.info("Mining blocks with random transactions using all utxos...")
        utxos = self.nodes[0].listunspent()
        calculated_merkle_tree_disk_size = 0
        verifyData = {}
        while len(utxos) > 0:
            # Choose random number of transactions
            send_transactions = random.randint(1, 400)
            if len(utxos) < send_transactions:
                send_transactions = len(utxos)
            # Send transactions
            for i in range(send_transactions):
                tx_in = utxos.pop()
                tx_out = tx_in["amount"] - Decimal("0.01")
                tx = self.nodes[0].createrawtransaction([tx_in], {self.nodes[1].getnewaddress(): tx_out})
                txid = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx)["hex"])
            # Mine a block
            self.nodes[0].generate(1)
            sync_blocks(self.nodes[0:1])
            # Verify proofs of some random transactions in each block
            hash_of_this_block = self.nodes[0].getblockhash(self.nodes[0].getblockcount())
            transactions_of_this_block = self.nodes[0].getblock(hash_of_this_block, True)["tx"]
            calculated_merkle_tree_disk_size += self.merkle_tree_size(len(transactions_of_this_block))
            verifyData[hash_of_this_block] = transactions_of_this_block
        # Verify merkle proofs of all transactions in all blocks
        self.verify_stored_data(verifyData, 0)
        
        # Data files checks
        number_of_data_files = 0
        disk_size = 0
        node0_data_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "merkle", "")
        for data_file in os.listdir(node0_data_dir):
            data_file_name = node0_data_dir + data_file
            if os.path.isfile(data_file_name):
                data_file_size = os.path.getsize(data_file_name)
                # No file should be bigger than 30 kB since no Merkle Tree takes more than 25 kB
                assert_greater_than(30 * 1024, data_file_size)
                disk_size += data_file_size
                number_of_data_files += 1
        # Verify that Merkle Tree disk size is at least the size of Merkle Trees we just stored
        assert_greater_than(disk_size, calculated_merkle_tree_disk_size)
        # Number of data files should be at least calculated_merkle_tree_disk_size/preferred_file_size
        assert_greater_than(number_of_data_files, calculated_merkle_tree_disk_size/(30 * 1024))

        # Delete index to test recreation of index when node is started again
        self.log.info("Restarting nodes to remove Merkle Trees index...")
        self.stop_nodes()
        node0_index_dir = os.path.join(node0_data_dir, "index", "")
        shutil.rmtree(node0_index_dir)
        self.start_nodes(self.extra_args)
        # Repeat merkle proof checks
        self.verify_stored_data(verifyData, 0)
        # Since index was recreated from data files, requesting existing merkle trees shouldn't create any new data
        new_disk_size = 0
        for data_file in os.listdir(node0_data_dir):
            data_file_name = node0_data_dir + data_file
            if os.path.isfile(data_file_name):
                new_disk_size += os.path.getsize(data_file_name)
        assert_equal(disk_size, new_disk_size)