Example #1
0
    def test_bip68_not_consensus(self):
        assert (get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)

        tx1 = from_hex(CTransaction(), self.nodes[0].getrawtransaction(txid))
        tx1.rehash()

        # Make an anyone-can-spend transaction
        tx2 = CTransaction()
        tx2.nVersion = 1
        tx2.vin = [CTxIn(COutPoint(tx1.x16r, 0), n_sequence=0)]
        tx2.vout = [
            CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN),
                   CScript([b'a']))
        ]

        # sign tx2
        tx2_raw = self.nodes[0].signrawtransaction(to_hex(tx2))["hex"]
        tx2 = from_hex(tx2, tx2_raw)
        tx2.rehash()

        self.nodes[0].sendrawtransaction(to_hex(tx2))

        # Now make an invalid spend of tx2 according to BIP68
        sequence_value = 100  # 100 block relative locktime

        tx3 = CTransaction()
        tx3.nVersion = 2
        tx3.vin = [CTxIn(COutPoint(tx2.x16r, 0), n_sequence=sequence_value)]
        tx3.vout = [
            CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN),
                   CScript([b'a']))
        ]
        tx3.rehash()

        assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                self.nodes[0].sendrawtransaction, to_hex(tx3))

        # make a block that violates bip68; ensure that the tip updates
        tip = int(self.nodes[0].getbestblockhash(), 16)
        block = create_block(
            tip, create_coinbase(self.nodes[0].getblockcount() + 1))
        block.nVersion = 3
        block.vtx.extend([tx1, tx2, tx3])
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].submitblock(to_hex(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)
Example #2
0
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block

        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(105):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(105):
            out.append(self.chain.get_spendable_output())

        assert_equal(node.getblock(node.getbestblockhash())['height'], 105)

        block(1)

        redeem_script = CScript([OP_TRUE, OP_RETURN, b"a" * 5000])

        spend_tx1 = CTransaction()
        spend_tx1.vin.append(
            CTxIn(COutPoint(out[2].tx.sha256, out[2].n), CScript(),
                  0xffffffff))
        spend_tx1.vout.append(CTxOut(500, redeem_script))
        spend_tx1.vout.append(CTxOut(500, redeem_script))
        spend_tx1.calc_sha256()
        self.log.info(spend_tx1.hash)

        self.chain.update_block(1, [spend_tx1])
        yield self.accepted()

        tx1 = CTransaction()
        tx1.vout = [CTxOut(499, CScript([OP_TRUE]))]
        tx1.vin.append(
            CTxIn(COutPoint(spend_tx1.sha256, 0), CScript(), 0xfffffff))
        tx1.vin.append(
            CTxIn(COutPoint(spend_tx1.sha256, 1), CScript(), 0xfffffff))
        tx1.calc_sha256()
        self.log.info(tx1.hash)
        yield TestInstance(
            [[tx1, RejectResult(16, b'bad-txns-inputs-too-large')]])
Example #3
0
        def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
            sequence_value = 1
            if not use_height_lock:
                sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG

            tx = CTransaction()
            tx.nVersion = 2
            tx.vin = [
                CTxIn(COutPoint(orig_tx.x16r, 0), n_sequence=sequence_value)
            ]
            tx.vout = [
                CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN),
                       CScript([b'a']))
            ]
            tx.rehash()

            if orig_tx.hash in node.getrawmempool():
                # sendrawtransaction should fail if the tx is in the mempool
                assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                        node.sendrawtransaction, to_hex(tx))
            else:
                # sendrawtransaction should succeed if the tx is not in the mempool
                node.sendrawtransaction(to_hex(tx))

            return tx
Example #4
0
    def test_disable_flag(self):
        # Create some unconfirmed inputs
        new_addr = self.nodes[0].getnewaddress()
        self.nodes[0].sendtoaddress(new_addr, 2)  # send 2 RVN

        utxos = self.nodes[0].listunspent(0, 0)
        assert (len(utxos) > 0)

        utxo = utxos[0]

        tx1 = CTransaction()
        value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN)

        # Check that the disable flag disables relative locktime.
        # If sequence locks were used, this would require 1 block for the
        # input to mature.
        sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
        tx1.vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  n_sequence=sequence_value)
        ]
        tx1.vout = [CTxOut(value, CScript([b'a']))]

        tx1_signed = self.nodes[0].signrawtransaction(to_hex(tx1))["hex"]
        tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
        tx1_id = int(tx1_id, 16)

        # This transaction will enable sequence-locks, so this transaction should
        # fail
        tx2 = CTransaction()
        tx2.nVersion = 2
        sequence_value = sequence_value & 0x7fffffff
        tx2.vin = [CTxIn(COutPoint(tx1_id, 0), n_sequence=sequence_value)]
        tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))]
        tx2.rehash()

        assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                self.nodes[0].sendrawtransaction, to_hex(tx2))

        # Setting the version back down to 1 should disable the sequence lock,
        # so this should be accepted.
        tx2.nVersion = 1

        self.nodes[0].sendrawtransaction(to_hex(tx2))
Example #5
0
    def process(self, tx, height):
        is_coinbase = tx.get_type() == TxType.COINBASE

        start_index = 1 if is_coinbase else 0
        for tx_in in tx.vin[start_index:]:
            outpoints_equal = lambda coin: coin.outpoint.hash == tx_in.prevout.hash and coin.outpoint.n == tx_in.prevout.n
            prevout_utxo = next(filter(outpoints_equal, self.available_outputs+self.spent_outputs))
            self.current_inputs.append(UTXO(prevout_utxo.height, prevout_utxo.tx_type, prevout_utxo.outpoint, prevout_utxo.txOut))

        self.current_outputs.extend([UTXO(height, tx.get_type(), COutPoint(tx.sha256, i), tx.vout[i]) for i in range(len(tx.vout))])
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block

        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        test, out, _ = prepare_init_chain(self.chain, 105, 105, block_0=False)

        yield test

        assert_equal(node.getblock(node.getbestblockhash())['height'], 105)

        block(1)

        redeem_script = CScript([OP_TRUE, OP_RETURN, b"a" * 5000])

        spend_tx1 = CTransaction()
        spend_tx1.vin.append(
            CTxIn(COutPoint(out[2].tx.sha256, out[2].n), CScript(),
                  0xffffffff))
        spend_tx1.vout.append(CTxOut(500, redeem_script))
        spend_tx1.vout.append(CTxOut(500, redeem_script))
        spend_tx1.calc_sha256()
        self.log.info(spend_tx1.hash)

        self.chain.update_block(1, [spend_tx1])
        yield self.accepted()

        tx1 = CTransaction()
        tx1.vout = [CTxOut(499, CScript([OP_TRUE]))]
        tx1.vin.append(
            CTxIn(COutPoint(spend_tx1.sha256, 0), CScript(), 0xfffffff))
        tx1.vin.append(
            CTxIn(COutPoint(spend_tx1.sha256, 1), CScript(), 0xfffffff))
        tx1.calc_sha256()
        self.log.info(tx1.hash)
        yield TestInstance(
            [[tx1, RejectResult(16, b'bad-txns-inputs-too-large')]])
Example #7
0
    def run_test(self):
        self.setup_stake_coins(*self.nodes)

        # Setup the p2p connections
        # test_node connects to node0 (not whitelisted)
        test_node = self.nodes[0].add_p2p_connection(P2PInterface())
        # min_work_node connects to node1 (whitelisted)
        min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())

        fork_snapshot_meta = get_tip_snapshot_meta(self.nodes[0])
        utxo_manager = UTXOManager(self.nodes[0], fork_snapshot_meta)
        genesis_coin = get_unspent_coins(self.nodes[0], 1)[0]
        genesis_txout = CTxOut(
            int(genesis_coin['amount'] * UNIT),
            CScript(hex_str_to_bytes(genesis_coin['scriptPubKey'])))
        genesis_utxo = [
            UTXO(
                0, TxType.COINBASE,
                COutPoint(int(genesis_coin['txid'], 16), genesis_coin['vout']),
                genesis_txout)
        ]
        utxo_manager.available_outputs = genesis_utxo

        self.log.info("1. Have nodes mine a block (leave IBD)")
        [n.generate(1) for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
        tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0])

        self.log.info(
            "2. Send one block that builds on each tip. This should be accepted by node0."
        )
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        coin = get_unspent_coins(self.nodes[0], 1)[0]
        for i in range(2):
            coinbase = sign_coinbase(
                self.nodes[0], create_coinbase(2, coin,
                                               tip_snapshot_meta.hash))
            blocks_h2.append(create_block(tips[i], coinbase, block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        min_work_node.send_message(msg_block(blocks_h2[1]))

        for x in [test_node, min_work_node]:
            x.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 1)
        self.log.info(
            "First height 2 block accepted by node0; correctly rejected by node1"
        )

        self.log.info("3. Send another block that builds on genesis.")
        coinbase = utxo_manager.get_coinbase(1, n_pieces=300)
        block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0),
                                 coinbase, block_time)
        block_time += 1
        block_h1f.solve()
        test_node.send_message(msg_block(block_h1f))
        utxo_manager.process(coinbase, 1)

        test_node.sync_with_ping()
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h1f.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, block_h1f.hash)

        self.log.info("4. Send another two block that build on the fork.")
        coinbase = utxo_manager.get_coinbase(2)
        block_h2f = create_block(block_h1f.sha256, coinbase, block_time)
        block_time += 1
        block_h2f.solve()
        test_node.send_message(msg_block(block_h2f))

        utxo_manager.process(coinbase, 2)

        test_node.sync_with_ping()
        # Since the earlier block was not processed by node, the new block
        # can't be fully validated.
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h2f.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found

        # But this block should be accepted by node since it has equal work.
        self.nodes[0].getblock(block_h2f.hash)
        self.log.info("Second height 2 block accepted, but not reorg'ed to")

        self.log.info(
            "4b. Now send another block that builds on the forking chain.")
        coinbase = utxo_manager.get_coinbase(3)
        block_h3 = create_block(block_h2f.sha256, coinbase,
                                block_h2f.nTime + 1)
        block_h3.solve()
        test_node.send_message(msg_block(block_h3))
        utxo_manager.process(coinbase, 3)

        test_node.sync_with_ping()
        # Since the earlier block was not processed by node, the new block
        # can't be fully validated.
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h3.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        self.nodes[0].getblock(block_h3.hash)

        # But this block should be accepted by node since it has more work.
        self.nodes[0].getblock(block_h3.hash)
        self.log.info("Unrequested more-work block accepted")

        self.log.info("4c. Now mine 288 more blocks and deliver")
        # all should be processed but
        # the last (height-too-high) on node (as long as it is not missing any headers)
        tip = block_h3
        all_blocks = []
        for height in range(4, 292):
            coinbase = utxo_manager.get_coinbase(height)
            next_block = create_block(tip.sha256, coinbase, tip.nTime + 1)
            next_block.solve()
            all_blocks.append(next_block)
            tip = next_block
            utxo_manager.process(coinbase, height)

        # Now send the block at height 5 and check that it wasn't accepted (missing header)
        test_node.send_message(msg_block(all_blocks[1]))
        test_node.sync_with_ping()
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock,
                                all_blocks[1].hash)
        assert_raises_rpc_error(-5, "Block not found",
                                self.nodes[0].getblockheader,
                                all_blocks[1].hash)

        # The block at height 5 should be accepted if we provide the missing header, though
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(all_blocks[0]))
        test_node.send_message(headers_message)
        test_node.send_message(msg_block(all_blocks[1]))
        test_node.sync_with_ping()
        self.nodes[0].getblock(all_blocks[1].hash)

        # Now send the blocks in all_blocks
        for i in range(288):
            test_node.send_message(msg_block(all_blocks[i]))
        test_node.sync_with_ping()

        # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
        for x in all_blocks[:-1]:
            self.nodes[0].getblock(x.hash)
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, all_blocks[-1].hash)

        self.log.info(
            "5. Test handling of unrequested block on the node that didn't process"
        )
        # Should still not be processed (even though it has a child that has more
        # work).

        # The node should have requested the blocks at some point, so
        # disconnect/reconnect first

        self.nodes[0].disconnect_p2ps()
        self.nodes[1].disconnect_p2ps()

        test_node = self.nodes[0].add_p2p_connection(P2PInterface())

        test_node.send_message(msg_block(block_h1f))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        self.log.info(
            "Unrequested block that would complete more-work chain was ignored"
        )

        self.log.info("6. Try to get node to request the missing block.")
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_message.pop("getdata", None)
            test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_message["getdata"]

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, block_h1f.sha256)
        self.log.info("Inv at tip triggered getdata for unprocessed block")

        self.log.info(
            "7. Send the missing block for the third time (now it is requested)"
        )
        test_node.send_message(msg_block(block_h1f))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        self.nodes[0].getblock(all_blocks[286].hash)
        assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, all_blocks[287].hash)
        self.log.info(
            "Successfully reorged to longer chain from non-whitelisted peer")

        self.log.info(
            "8. Create a chain which is invalid at a height longer than the")
        # current chain, but which has more blocks on top of that

        # Reset utxo managers to current state
        utxo_fork_manager = UTXOManager(self.nodes[0],
                                        get_tip_snapshot_meta(self.nodes[0]))
        utxo_fork_manager.available_outputs = utxo_manager.available_outputs
        utxo_manager = UTXOManager(self.nodes[0],
                                   get_tip_snapshot_meta(self.nodes[0]))
        utxo_manager.available_outputs = utxo_fork_manager.available_outputs

        # Create one block on top of the valid chain
        coinbase = utxo_manager.get_coinbase(291)
        valid_block = create_block(all_blocks[286].sha256, coinbase,
                                   all_blocks[286].nTime + 1)
        valid_block.solve()
        test_node.send_and_ping(msg_block(valid_block))
        assert_equal(self.nodes[0].getblockcount(), 291)

        # Create three blocks on a fork, but make the second one invalid
        coinbase = utxo_fork_manager.get_coinbase(291)
        block_291f = create_block(all_blocks[286].sha256, coinbase,
                                  all_blocks[286].nTime + 1)
        block_291f.solve()
        utxo_fork_manager.process(coinbase, 291)
        coinbase = utxo_fork_manager.get_coinbase(292)
        block_292f = create_block(block_291f.sha256, coinbase,
                                  block_291f.nTime + 1)
        # block_292f spends a coinbase below maturity!
        block_292f.vtx.append(
            create_tx_with_script(block_291f.vtx[0],
                                  0,
                                  script_sig=b"42",
                                  amount=1))
        block_292f.compute_merkle_trees()
        block_292f.solve()
        utxo_fork_manager.process(coinbase, 292)
        utxo_fork_manager.process(block_292f.vtx[1], 292)
        coinbase = utxo_fork_manager.get_coinbase(293)
        block_293f = create_block(block_292f.sha256, coinbase,
                                  block_292f.nTime + 1)
        block_293f.solve()
        utxo_fork_manager.process(coinbase, 293)

        # Now send all the headers on the chain and enough blocks to trigger reorg
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(block_291f))
        headers_message.headers.append(CBlockHeader(block_292f))
        headers_message.headers.append(CBlockHeader(block_293f))
        test_node.send_message(headers_message)

        test_node.sync_with_ping()
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_293f.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, block_293f.hash)

        test_node.send_message(msg_block(block_291f))

        test_node.sync_with_ping()
        self.nodes[0].getblock(block_291f.hash)

        test_node.send_message(msg_block(block_292f))

        # At this point we've sent an obviously-bogus block, wait for full processing
        # without assuming whether we will be disconnected or not
        try:
            # Only wait a short while so the test doesn't take forever if we do get
            # disconnected
            test_node.sync_with_ping(timeout=1)
        except AssertionError:
            test_node.wait_for_disconnect()

            self.nodes[0].disconnect_p2ps()
            test_node = self.nodes[0].add_p2p_connection(P2PInterface())

        # We should have failed reorg and switched back to 290 (but have block 291)
        assert_equal(self.nodes[0].getblockcount(), 291)
        assert_equal(self.nodes[0].getbestblockhash(), valid_block.hash)
        assert_equal(self.nodes[0].getblock(block_292f.hash)["confirmations"],
                     -1)

        # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
        coinbase = utxo_fork_manager.get_coinbase(294)
        block_294f = create_block(block_293f.sha256, coinbase,
                                  block_293f.nTime + 1)
        block_294f.solve()
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(block_294f))
        test_node.send_message(headers_message)
        test_node.wait_for_disconnect()

        self.log.info(
            "9. Connect node1 to node0 and ensure it is able to sync")
        connect_nodes(self.nodes[0], 1)
        sync_blocks([self.nodes[0], self.nodes[1]])
        self.log.info("Successfully synced nodes 1 and 0")
Example #8
0
    def test_sequence_lock_confirmed_inputs(self):
        # Create lots of confirmed utxos, and use them to generate lots of random
        # transactions.
        max_outputs = 50
        addresses = []
        while len(addresses) < max_outputs:
            addresses.append(self.nodes[0].getnewaddress())
        while len(self.nodes[0].listunspent()) < 200:
            random.shuffle(addresses)
            num_outputs = random.randint(1, max_outputs)
            outputs = {}
            for i in range(num_outputs):
                outputs[addresses[i]] = random.randint(1, 20) * 0.01
            self.nodes[0].sendmany("", outputs)
            self.nodes[0].generate(1)

        utxos = self.nodes[0].listunspent()

        # Try creating a lot of random transactions.
        # Each time, choose a random number of inputs, and randomly set
        # some of those inputs to be sequence locked (and randomly choose
        # between height/time locking). Small random chance of making the locks
        # all pass.
        for i in range(400):
            # Randomly choose up to 10 inputs
            num_inputs = random.randint(1, 10)
            random.shuffle(utxos)

            # Track whether any sequence locks used should fail
            should_pass = True

            # Track whether this transaction was built with sequence locks
            using_sequence_locks = False

            tx = CTransaction()
            tx.nVersion = 2
            value = 0
            for j in range(num_inputs):
                sequence_value = 0xfffffffe  # this disables sequence locks

                # 50% chance we enable sequence locks
                if random.randint(0, 1):
                    using_sequence_locks = True

                    # 10% of the time, make the input sequence value pass
                    input_will_pass = (random.randint(1, 10) == 1)
                    sequence_value = utxos[j]["confirmations"]
                    if not input_will_pass:
                        sequence_value += 1
                        should_pass = False

                    # Figure out what the median-time-past was for the confirmed input
                    # Note that if an input has N confirmations, we're going back N blocks
                    # from the tip so that we're looking up MTP of the block
                    # PRIOR to the one the input appears in, as per the BIP68 spec.
                    orig_time = self.get_median_time_past(
                        utxos[j]["confirmations"])
                    cur_time = self.get_median_time_past(0)  # MTP of the tip

                    # can only timelock this input if it's not too old -- otherwise use height
                    can_time_lock = True
                    if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY
                        ) >= SEQUENCE_LOCKTIME_MASK:
                        can_time_lock = False

                    # if time-lockable, then 50% chance we make this a time lock
                    if random.randint(0, 1) and can_time_lock:
                        # Find first time-lock value that fails, or latest one that succeeds
                        time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
                        if input_will_pass and time_delta > cur_time - orig_time:
                            sequence_value = ((cur_time - orig_time) >>
                                              SEQUENCE_LOCKTIME_GRANULARITY)
                        elif not input_will_pass and time_delta <= cur_time - orig_time:
                            sequence_value = (
                                (cur_time - orig_time) >>
                                SEQUENCE_LOCKTIME_GRANULARITY) + 1
                        sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
                tx.vin.append(
                    CTxIn(COutPoint(int(utxos[j]["txid"], 16),
                                    utxos[j]["vout"]),
                          n_sequence=sequence_value))
                value += utxos[j]["amount"] * COIN
            # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
            tx_size = len(to_hex(tx)) // 2 + 120 * num_inputs + 50
            tx.vout.append(
                CTxOut(int(value - self.relayfee * tx_size * COIN / 1000),
                       CScript([b'a'])))
            rawtx = self.nodes[0].signrawtransaction(to_hex(tx))["hex"]

            if using_sequence_locks and not should_pass:
                # This transaction should be rejected
                assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                        self.nodes[0].sendrawtransaction,
                                        rawtx)
            else:
                # This raw transaction should be accepted
                self.nodes[0].sendrawtransaction(rawtx)
                utxos = self.nodes[0].listunspent()
Example #9
0
    def test_sequence_lock_unconfirmed_inputs(self):
        # Store height so we can easily reset the chain at the end of the test
        cur_height = self.nodes[0].getblockcount()

        # Create a mempool tx.
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
        tx1 = from_hex(CTransaction(), self.nodes[0].getrawtransaction(txid))
        tx1.rehash()

        # Anyone-can-spend mempool tx.
        # Sequence lock of 0 should pass.
        tx2 = CTransaction()
        tx2.nVersion = 2
        tx2.vin = [CTxIn(COutPoint(tx1.x16r, 0), n_sequence=0)]
        tx2.vout = [
            CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN),
                   CScript([b'a']))
        ]
        tx2_raw = self.nodes[0].signrawtransaction(to_hex(tx2))["hex"]
        tx2 = from_hex(tx2, tx2_raw)
        tx2.rehash()

        self.nodes[0].sendrawtransaction(tx2_raw)

        # Create a spend of the 0th output of orig_tx with a sequence lock
        # of 1, and test what happens when submitting.
        # orig_tx.vout[0] must be an anyone-can-spend output
        def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
            sequence_value = 1
            if not use_height_lock:
                sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG

            tx = CTransaction()
            tx.nVersion = 2
            tx.vin = [
                CTxIn(COutPoint(orig_tx.x16r, 0), n_sequence=sequence_value)
            ]
            tx.vout = [
                CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN),
                       CScript([b'a']))
            ]
            tx.rehash()

            if orig_tx.hash in node.getrawmempool():
                # sendrawtransaction should fail if the tx is in the mempool
                assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                        node.sendrawtransaction, to_hex(tx))
            else:
                # sendrawtransaction should succeed if the tx is not in the mempool
                node.sendrawtransaction(to_hex(tx))

            return tx

        test_nonzero_locks(tx2,
                           self.nodes[0],
                           self.relayfee,
                           use_height_lock=True)
        test_nonzero_locks(tx2,
                           self.nodes[0],
                           self.relayfee,
                           use_height_lock=False)

        # Now mine some blocks, but make sure tx2 doesn't get mined.
        # Use prioritisetransaction to lower the effective feerate to 0
        self.nodes[0].prioritisetransaction(txid=tx2.hash,
                                            fee_delta=int(-self.relayfee *
                                                          COIN))
        cur_time = int(time.time())
        for _ in range(10):
            self.nodes[0].setmocktime(cur_time + 600)
            self.nodes[0].generate(1)
            cur_time += 600

        assert (tx2.hash in self.nodes[0].getrawmempool())

        test_nonzero_locks(tx2,
                           self.nodes[0],
                           self.relayfee,
                           use_height_lock=True)
        test_nonzero_locks(tx2,
                           self.nodes[0],
                           self.relayfee,
                           use_height_lock=False)

        # Mine tx2, and then try again
        self.nodes[0].prioritisetransaction(txid=tx2.hash,
                                            fee_delta=int(self.relayfee *
                                                          COIN))

        # Advance the time on the node so that we can test timelocks
        self.nodes[0].setmocktime(cur_time + 600)
        self.nodes[0].generate(1)
        assert (tx2.hash not in self.nodes[0].getrawmempool())

        # Now that tx2 is not in the mempool, a sequence locked spend should
        # succeed
        tx3 = test_nonzero_locks(tx2,
                                 self.nodes[0],
                                 self.relayfee,
                                 use_height_lock=False)
        assert (tx3.hash in self.nodes[0].getrawmempool())

        self.nodes[0].generate(1)
        assert (tx3.hash not in self.nodes[0].getrawmempool())

        # One more test, this time using height locks
        tx4 = test_nonzero_locks(tx3,
                                 self.nodes[0],
                                 self.relayfee,
                                 use_height_lock=True)
        assert (tx4.hash in self.nodes[0].getrawmempool())

        # Now try combining confirmed and unconfirmed inputs
        tx5 = test_nonzero_locks(tx4,
                                 self.nodes[0],
                                 self.relayfee,
                                 use_height_lock=True)
        assert (tx5.hash not in self.nodes[0].getrawmempool())

        utxos = self.nodes[0].listunspent()
        tx5.vin.append(
            CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]),
                  n_sequence=1))
        tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN)
        raw_tx5 = self.nodes[0].signrawtransaction(to_hex(tx5))["hex"]

        assert_raises_rpc_error(-26, NOT_FINAL_ERROR,
                                self.nodes[0].sendrawtransaction, raw_tx5)

        # Test mempool-BIP68 consistency after reorg
        #
        # State of the transactions in the last blocks:
        # ... -> [ tx2 ] ->  [ tx3 ]
        #         tip-1        tip
        # And currently tx4 is in the mempool.
        #
        # If we invalidate the tip, tx3 should get added to the mempool, causing
        # tx4 to be removed (fails sequence-lock).
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        assert (tx4.hash not in self.nodes[0].getrawmempool())
        assert (tx3.hash in self.nodes[0].getrawmempool())

        # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
        # diagram above).
        # This would cause tx2 to be added back to the mempool, which in turn causes
        # tx3 to be removed.
        tip = int(
            self.nodes[0].getblockhash(self.nodes[0].getblockcount() - 1), 16)
        height = self.nodes[0].getblockcount()
        for _ in range(2):
            block = create_block(tip, create_coinbase(height), cur_time)
            block.nVersion = 3
            block.rehash()
            block.solve()
            tip = block.x16r
            height += 1
            self.nodes[0].submitblock(to_hex(block))
            cur_time += 1

        mempool = self.nodes[0].getrawmempool()
        assert (tx3.hash not in mempool)
        assert (tx2.hash in mempool)

        # Reset the chain and get rid of the mocktimed-blocks
        self.nodes[0].setmocktime(0)
        self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height +
                                                                 1))
        self.nodes[0].generate(10)
Example #10
0
    def test_p2p_schema(self):
        """
        This test creates the following nodes:
        1. serving_node - full node that has the the snapshot
        2. syncing_p2p - mini node that downloads snapshot from serving_node and tests the protocol
        3. syncing_node - the node which starts with fast sync
        4. serving_p2p - mini node that sends snapshot to syncing_node and tests the protocol
        """
        serving_node = self.nodes[0]
        syncing_node = self.nodes[1]

        self.start_node(serving_node.index)
        self.start_node(syncing_node.index)

        self.setup_stake_coins(serving_node)

        # generate 2 epochs + 1 block to create the first finalized snapshot
        serving_node.generatetoaddress(
            5 + 5 + 1, serving_node.getnewaddress('', 'bech32'))
        assert_equal(serving_node.getblockcount(), 11)
        wait_until(lambda: has_valid_snapshot(serving_node, 4), timeout=10)

        syncing_p2p = serving_node.add_p2p_connection(BaseNode())
        serving_p2p = syncing_node.add_p2p_connection(
            BaseNode(), services=SERVICE_FLAGS_WITH_SNAPSHOT)

        # configure serving_p2p to have snapshot header and parent block
        serving_p2p.update_snapshot_from(serving_node)
        serving_p2p.update_headers_and_blocks_from(serving_node)

        network_thread_start()
        syncing_p2p.wait_for_verack()

        # test snapshot downloading in chunks
        syncing_p2p.send_message(msg_getsnaphead())
        wait_until(lambda: syncing_p2p.snapshot_header.total_utxo_subsets > 0,
                   timeout=10)
        chunks = math.ceil(syncing_p2p.snapshot_header.total_utxo_subsets / 2)
        for i in range(1, chunks + 1):
            getsnapshot = GetSnapshot(
                syncing_p2p.snapshot_header.snapshot_hash,
                len(syncing_p2p.snapshot_data), 2)
            syncing_p2p.send_message(msg_getsnapshot(getsnapshot))

            snapshot_size = min(i * 2,
                                syncing_p2p.snapshot_header.total_utxo_subsets)
            wait_until(lambda: len(syncing_p2p.snapshot_data) == snapshot_size,
                       timeout=10)
        assert_equal(len(syncing_p2p.snapshot_data),
                     syncing_p2p.snapshot_header.total_utxo_subsets)

        self.log.info('Snapshot was downloaded successfully')

        # validate the snapshot hash
        utxos = []
        for subset in syncing_p2p.snapshot_data:
            for n in subset.outputs:
                out = COutPoint(subset.tx_id, n)
                utxo = UTXO(subset.height, subset.tx_type, out,
                            subset.outputs[n])
                utxos.append(utxo)
        inputs = bytes_to_hex_str(ser_vector([]))
        outputs = bytes_to_hex_str(ser_vector(utxos))
        stake_modifier = "%064x" % syncing_p2p.snapshot_header.stake_modifier
        chain_work = bytes_to_hex_str(
            ser_uint256(syncing_p2p.snapshot_header.chain_work))
        res = self.nodes[0].calcsnapshothash(inputs, outputs, stake_modifier,
                                             chain_work)
        snapshot_hash = uint256_from_hex(res['hash'])
        assert_equal(snapshot_hash, syncing_p2p.snapshot_header.snapshot_hash)

        self.log.info('Snapshot was validated successfully')

        # test snapshot serving
        wait_until(lambda: serving_p2p.snapshot_requested, timeout=10)
        snapshot = Snapshot(
            snapshot_hash=serving_p2p.snapshot_header.snapshot_hash,
            utxo_subset_index=0,
            utxo_subsets=syncing_p2p.snapshot_data,
        )
        serving_p2p.send_message(msg_snapshot(snapshot))
        wait_until(lambda: syncing_node.getblockcount() == 11, timeout=10)
        assert_equal(serving_node.gettxoutsetinfo(),
                     syncing_node.gettxoutsetinfo())

        self.log.info('Snapshot was sent successfully')

        # clean up test
        serving_node.disconnect_p2ps()
        syncing_node.disconnect_p2ps()
        network_thread_join()
        self.stop_node(serving_node.index)
        self.stop_node(syncing_node.index)
        self.log.info('test_p2p_schema passed')