Exemplo n.º 1
0
    def run_test(self):
        with self.run_node_with_connections(
                "Scenario 1",
                0, [
                    '-banscore=100000', '-genesisactivationheight=110',
                    '-maxstdtxvalidationduration=100'
                ],
                number_of_connections=1) as (conn, ):

            coinbase1 = make_new_block(conn)

            for _ in range(110):
                make_new_block(conn)

            tx_parent = create_transaction(coinbase1, 0, CScript(),
                                           coinbase1.vout[0].nValue - 1000,
                                           CScript([OP_TRUE]))
            tx_parent.rehash()
            tx_orphan = make_big_orphan(
                tx_parent, DEFAULT_MAX_TX_SIZE_POLICY_AFTER_GENESIS)
            assert_equal(len(tx_orphan.serialize()),
                         DEFAULT_MAX_TX_SIZE_POLICY_AFTER_GENESIS)

            conn.send_message(msg_tx(tx_orphan))
            # Making sure parent is not sent right away for bitcond to detect an orphan
            time.sleep(1)
            conn.send_message(msg_tx(tx_parent))
            check_mempool_equals(conn.rpc, [tx_parent, tx_orphan])
Exemplo n.º 2
0
    def create_fund_txn(self,
                        conn,
                        noutput,
                        tx_fee,
                        locking_script,
                        pubkey=None):
        # create a new block with coinbase
        last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
        coinbase = create_coinbase(height=last_block_info["height"] + 1,
                                   pubkey=pubkey)
        new_block = create_block(int(last_block_info["hash"], 16),
                                 coinbase=coinbase,
                                 nTime=last_block_info["time"] + 1)
        new_block.nVersion = last_block_info["version"]
        new_block.solve()
        conn.send_message(msg_block(new_block))
        wait_until(lambda: conn.rpc.getbestblockhash() == new_block.hash,
                   check_interval=0.3)
        # mature the coinbase
        conn.rpc.generate(100)
        # create and send a funding txn
        funding_tx = self.create_tx([(coinbase, 0)], 2, 1.5, locking_script)
        conn.send_message(msg_tx(funding_tx))
        check_mempool_equals(conn.rpc, [funding_tx])
        conn.rpc.generate(1)
        # create a new txn which pays the specified tx_fee
        new_tx = self.create_tx([(funding_tx, 0)], noutput, tx_fee,
                                locking_script)
        last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
        new_block = create_block(
            int(last_block_info["hash"], 16),
            coinbase=create_coinbase(height=last_block_info["height"] + 1),
            nTime=last_block_info["time"] + 1)
        new_block.nVersion = last_block_info["version"]
        new_block.vtx.append(new_tx)
        new_block.hashMerkleRoot = new_block.calc_merkle_root()
        new_block.calc_sha256()
        new_block.solve()

        conn.send_message(msg_block(new_block))
        wait_until(lambda: conn.rpc.getbestblockhash() == new_block.hash,
                   check_interval=0.3)

        return new_tx
Exemplo n.º 3
0
    def _prepare_node(self):

        with self.run_node_with_connections(
                "Prepare utxos", 0, [], number_of_connections=1) as (conn, ):
            # create block with coinbase
            coinbase = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            # mature the coinbase
            conn.rpc.generate(150)

            funding_tx = self.create_tx([(coinbase, 0)], 4, 1.5)

            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])
            conn.rpc.generate(1)

            return funding_tx
Exemplo n.º 4
0
    def run_invalid_orphans_scenarios(self):

        with self.run_node_with_connections("Scenario 1: Valid orphans",
                                            0, [],
                                            number_of_connections=1,
                                            ip=get_lan_ip()) as (conn, ):

            parent_tx1, parent_tx2, orphans, rejected_txs = self.prepare_parents_and_children(
                conn, parent_invalidity=None, children_invalidity=None)

            #sending orphans
            for tx in orphans:
                conn.send_message(msg_tx(tx))

            assert conn.rpc.getmempoolinfo(
            )["size"] == 0, "No transactions should be in the mempool!"
            assert len(
                rejected_txs) == 0, "No transactions should be rejected!"

            # sending parents
            conn.send_message(msg_tx(parent_tx1))
            conn.send_message(msg_tx(parent_tx2))

            # all transactions should be accepted to mempool
            check_mempool_equals(conn.rpc, orphans + [parent_tx1, parent_tx2])
            assert len(
                rejected_txs) == 0, "No transactions should be rejected!"

        with self.run_node_with_connections("Scenario 2: low fee orphans",
                                            0, [],
                                            number_of_connections=1,
                                            ip=get_lan_ip()) as (conn, ):

            parent_tx1, parent_tx2, orphans, rejected_txs = self.prepare_parents_and_children(
                conn, parent_invalidity=None, children_invalidity="low_fee")

            # sending orphans
            for tx in orphans:
                conn.send_message(msg_tx(tx))

            sleep(1)
            assert conn.rpc.getmempoolinfo(
            )["size"] == 0, "No transactions should be in the mempool!"
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            # sending first parent
            conn.send_message(msg_tx(parent_tx1))
            check_mempool_equals(conn.rpc, [parent_tx1])
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            # sending second parent
            conn.send_message(msg_tx(parent_tx2))
            check_mempool_equals(conn.rpc, [parent_tx1, parent_tx2])

            # check that all orphans are rejected, but we are not banned
            self.check_rejected(rejected_txs, orphans)
            assert conn.connected, "We should still be connected (not banned)."

        with self.run_node_with_connections(
                "Scenario 3: invalid signature orphans",
                0, [],
                number_of_connections=1,
                ip=get_lan_ip()) as (conn, ):

            parent_tx1, parent_tx2, orphans, rejected_txs = self.prepare_parents_and_children(
                conn,
                parent_invalidity=None,
                children_invalidity="bad_signature")

            # sending orphans
            for tx in orphans:
                conn.send_message(msg_tx(tx))

            sleep(1)
            assert conn.rpc.getmempoolinfo(
            )["size"] == 0, "No transactions should be in the mempool!"
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            conn.send_message(msg_tx(parent_tx1))
            check_mempool_equals(
                conn.rpc,
                [parent_tx1])  # Only parent_tx1 should be in the mempool
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            conn.send_message(msg_tx(parent_tx2))
            check_mempool_equals(
                conn.rpc,
                [parent_tx1, parent_tx2
                 ])  # Only parent_tx1 and parent_tx2 should be in the mempool

            # we must be banned
            conn.cb.wait_for_disconnect(timeout=20)  # will be disconnected
            assert len(conn.rpc.listbanned()) == 1  # and banned
            conn.rpc.clearbanned()

        # the banscore is set to 101 because rejection of the tx with invalid signature brings 100 points,
        # we don't want to be banned as result of only one tx
        with self.run_node_with_connections("Scenario 4: bad signature parent",
                                            0, ['-banscore=101'],
                                            number_of_connections=1,
                                            ip=get_lan_ip()) as (conn, ):

            valid_parent_tx, invalid_parent_tx, orphans, rejected_txs = self.prepare_parents_and_children(
                conn,
                parent_invalidity="bad_signature",
                children_invalidity=None)

            for tx in orphans:
                conn.send_message(msg_tx(tx))

            sleep(1)
            assert conn.rpc.getmempoolinfo(
            )["size"] == 0, "No transactions should be in the mempool!"
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            conn.send_message(msg_tx(valid_parent_tx))
            check_mempool_equals(
                conn.rpc, [valid_parent_tx
                           ])  # Only valid_parent_tx should be in the mempool
            assert len(
                rejected_txs) == 0, "No transactions should be rejected yet!"

            conn.send_message(msg_tx(invalid_parent_tx))
            check_mempool_equals(
                conn.rpc,
                [valid_parent_tx
                 ])  # Still only valid_parent_tx should be in the mempool
            self.check_rejected(
                rejected_txs,
                [invalid_parent_tx])  # And only invalid parent is rejected
            sleep(1)
            assert len(conn.rpc.listbanned()) == 0  # not banned
    def run_test(self):

        limitancestorcount = 20
        limitcpfpgroupmemberscount = 10

        with self.run_node_with_connections(
                "Tests for ancestors count limit, for primary and secondary mempool",
                0,
            [
                "-blockmintxfee=0.00001",
                f"-limitancestorcount={limitancestorcount}",
                f"-limitcpfpgroupmemberscount={limitcpfpgroupmemberscount}",
                "-checkmempool=1",
            ],
                number_of_connections=1) as (conn, ):

            mining_fee = 1.01  # in satoshi per byte
            relayfee = float(conn.rpc.getnetworkinfo()["relayfee"] * COIN /
                             1000) + 0.01  # in satoshi per byte

            # create block with coinbase
            coinbase = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            #mature the coinbase
            conn.rpc.generate(150)

            funding_tx = self.create_tx([(coinbase, 0)], 2, 1.5)

            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])
            conn.rpc.generate(1)

            rejected_txs = []

            def on_reject(conn, msg):
                rejected_txs.append(msg)

            conn.cb.on_reject = on_reject

            # create oversized primary mempool chain, the last tx in the chain will be over the limit
            last_outpoint = (funding_tx, 0)
            primary_mempool_chain = []
            for _ in range(limitancestorcount + 1):
                tx = self.create_tx([last_outpoint], 1, mining_fee)
                primary_mempool_chain.append(tx)
                last_outpoint = (tx, 0)

            # create oversized secondary mempool chain, the last tx in the chain will be over the limit
            last_outpoint = (funding_tx, 1)
            secondary_mempool_chain = []
            for _ in range(limitcpfpgroupmemberscount + 1):
                tx = self.create_tx([last_outpoint], 1, relayfee)
                secondary_mempool_chain.append(tx)
                last_outpoint = (tx, 0)

            # send transactions to the node
            for tx in primary_mempool_chain[:-1]:
                conn.send_message(msg_tx(tx))

            for tx in secondary_mempool_chain[:-1]:
                conn.send_message(msg_tx(tx))

            # all transactions that are sent should en up in the mempool, chains are at the limit
            check_mempool_equals(
                conn.rpc,
                primary_mempool_chain[:-1] + secondary_mempool_chain[:-1])

            # now send transactions that try to extend chain over the limit, should be rejected
            for tx_to_reject in [
                    primary_mempool_chain[-1], secondary_mempool_chain[-1]
            ]:
                conn.send_message(msg_tx(tx_to_reject))
                wait_until(lambda: len(rejected_txs) == 1)
                assert_equal(rejected_txs[0].data, tx_to_reject.sha256)
                assert_equal(rejected_txs[0].reason, b'too-long-mempool-chain')
                rejected_txs.clear()

            # lets mine transactions from beggining of the chain, this will shorten the chains
            last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
            block = create_block(
                int(last_block_info["hash"], 16),
                coinbase=create_coinbase(height=last_block_info["height"] + 1),
                nTime=last_block_info["time"] + 1)
            block.vtx.append(primary_mempool_chain[0])
            block.vtx.append(secondary_mempool_chain[0])
            block.hashMerkleRoot = block.calc_merkle_root()
            block.calc_sha256()
            block.solve()

            conn.send_message(msg_block(block))
            wait_until(lambda: conn.rpc.getbestblockhash() == block.hash,
                       check_interval=0.3)

            # try to send transactions again, now chains are shorter and transactions will be accepted
            for tx_to_reject in [
                    primary_mempool_chain[-1], secondary_mempool_chain[-1]
            ]:
                conn.send_message(msg_tx(tx_to_reject))
            check_mempool_equals(
                conn.rpc,
                primary_mempool_chain[1:] + secondary_mempool_chain[1:])

            # invalidate the block, this will force mined transactions back to mempool
            # as we do not check chain lenght after reorg we will end up wit long chains in the mempool
            conn.rpc.invalidateblock(block.hash)
            check_mempool_equals(
                conn.rpc, primary_mempool_chain + secondary_mempool_chain)
    def run_test(self):
        with self.run_node_with_connections(
                "Preparation", 0, [], number_of_connections=1) as (conn, ):

            self.start_node(1)
            self.start_node(2)

            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)

            # create block with coinbase
            coinbase = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            #mature the coinbase
            conn.rpc.generate(101)

            N_TX_IN_BLOCK = 30
            funding_tx = self.create_tx([(coinbase, 0)], 3, 1000, 0)
            funding_tx_1 = self.create_tx([(funding_tx, 0)], N_TX_IN_BLOCK,
                                          1000, 0)
            funding_tx_2 = self.create_tx([(funding_tx, 1)], N_TX_IN_BLOCK,
                                          1000, 0)
            funding_tx_3 = self.create_tx([(funding_tx, 2)], N_TX_IN_BLOCK,
                                          1000, 0)

            conn.send_message(msg_tx(funding_tx))
            conn.send_message(msg_tx(funding_tx_1))
            conn.send_message(msg_tx(funding_tx_2))
            conn.send_message(msg_tx(funding_tx_3))
            check_mempool_equals(
                conn.rpc,
                [funding_tx, funding_tx_1, funding_tx_2, funding_tx_3])
            conn.rpc.generate(1)
            check_mempool_equals(conn.rpc, [])

            last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())

            wait_until(lambda: self.nodes[1].getbestblockhash() ==
                       last_block_info["hash"])
            wait_until(lambda: self.nodes[2].getbestblockhash() ==
                       last_block_info["hash"])

            self.stop_node(1)
            self.stop_node(2)

        block = self.create_block(funding_tx=funding_tx_1,
                                  block_target_size=10 * ONE_MEGABYTE,
                                  prev_hash=int(last_block_info["hash"], 16),
                                  prev_height=last_block_info["height"],
                                  prev_time=last_block_info["time"])

        block_bytes = block.serialize()
        block_size = len(block_bytes)
        block_bytes = None

        TARGET_SENDING_TIME_FAST = 50
        TOTAL_BLOCK_DOWNLOAD_TIMEOUT = 60
        TARGET_SENDING_TIME_SLOW = 70
        target_send_rate_fast = block_size / TARGET_SENDING_TIME_FAST
        cmd_total_timeout = 100 * TOTAL_BLOCK_DOWNLOAD_TIMEOUT // (
            10 * 60)  # 100% * (timeout) / 10min ,
        target_send_rate_slow = block_size / TARGET_SENDING_TIME_SLOW
        """ CASE 1 """
        self.test_send_block_to_node(
            label=
            "Single connection, sending slowly, setting only base timeout, timeout occures",
            node_index=0,
            block=block,
            send_rate=target_send_rate_slow,
            expected_time_to_send=TARGET_SENDING_TIME_SLOW,
            cmd_timeout_base=cmd_total_timeout,
            cmd_timeout_base_ibd=
            1000000,  # we are not in IBD so this param is not relevant
            cmd_timeout_per_peer=
            1000000,  # we are sending on only one connection so this param is not relevant
            expect_timeout=True,
        )

        self.test_send_block_to_node(
            label=
            "Single connection, sending fast enough, setting only base timeout, timeout does not occure",
            node_index=0,
            block=block,
            send_rate=target_send_rate_fast,
            expected_time_to_send=TARGET_SENDING_TIME_FAST,
            cmd_timeout_base=cmd_total_timeout,
            cmd_timeout_base_ibd=
            1,  # we are not in IBD so this param is not relevant
            cmd_timeout_per_peer=
            1,  # we are sending on only one connection so this param is not relevant
            expect_timeout=False,
        )
        """ CASE 2 """
        self.test_send_block_to_node(
            label=
            "Single connection, node in the initial block download, setting only base IBD timeout, sending slowly, timeout occures",
            node_index=1,
            block=block,
            send_rate=target_send_rate_slow,
            expected_time_to_send=TARGET_SENDING_TIME_SLOW,
            cmd_timeout_base=
            100000,  # we are in IBD so this param is not relevant
            cmd_timeout_base_ibd=cmd_total_timeout,  # we are in IBD
            cmd_timeout_per_peer=
            100000,  # we are sending on only one connection so this param is not relevant
            mocktime=int(
                time() + 48 * 60 *
                60),  # make node go 48 hours into future to put it in the IBD
            expect_timeout=True,
        )

        self.test_send_block_to_node(
            label=
            "Single connection, node in the initial block download, setting only base IBD timeout, sending fast enough, timeout does not occure",
            node_index=1,
            block=block,
            send_rate=target_send_rate_fast,
            expected_time_to_send=TARGET_SENDING_TIME_FAST,
            cmd_timeout_base=1,  # we are in IBD so this param is not relevant
            cmd_timeout_base_ibd=cmd_total_timeout,  # we are in IBD
            cmd_timeout_per_peer=
            1,  # we are sending on only one connection so this param is not relevant
            mocktime=int(
                time() + 48 * 60 *
                60),  # make node go 48 hours into future to put it in the IBD
            expect_timeout=False,
        )
        """ CASE 3 """
        block_2 = self.create_block(funding_tx=funding_tx_2,
                                    block_target_size=10 * ONE_MEGABYTE,
                                    prev_hash=int(last_block_info["hash"], 16),
                                    prev_height=last_block_info["height"],
                                    prev_time=last_block_info["time"])
        block_3 = self.create_block(funding_tx=funding_tx_3,
                                    block_target_size=10 * ONE_MEGABYTE,
                                    prev_hash=int(block_2.hash, 16),
                                    prev_height=last_block_info["height"] + 1,
                                    prev_time=last_block_info["time"] + 1)

        self.test_send_block_to_node(
            label=
            "Two connections, on both connections we are sending blocks, sending slowly, timeout occures",
            node_index=2,
            block=block,
            send_rate=target_send_rate_slow,
            expected_time_to_send=TARGET_SENDING_TIME_SLOW,
            cmd_timeout_base=cmd_total_timeout //
            2,  # half of the timeout is contributed by the base
            cmd_timeout_base_ibd=
            100000,  # we are not in IBD so this param is not relevant
            cmd_timeout_per_peer=cmd_total_timeout //
            2,  # another half of the timeout is contributed by the additional (single) per peer
            additional_conn_blocks=[
                block_2, block_3
            ],  # blocks to send on through additional connection
            additional_conn_send_rate=target_send_rate_slow,
            expect_timeout=True,
        )

        self.test_send_block_to_node(
            label=
            "Two connections, on both connections we are sending blocks, sending slowly, sending fast enough, timeout does not occure",
            node_index=2,
            block=block,
            send_rate=target_send_rate_fast,
            expected_time_to_send=TARGET_SENDING_TIME_FAST,
            cmd_timeout_base=cmd_total_timeout //
            2,  # half of the timeout is contributed by the base
            cmd_timeout_base_ibd=
            1,  # we are not in IBD so this param is not relevant
            cmd_timeout_per_peer=cmd_total_timeout //
            2,  # another half of the timeout is contributed by the additional (single) per peer
            additional_conn_blocks=[
                block_2, block_3
            ],  # blocks to send on through additional connection
            additional_conn_send_rate=target_send_rate_slow,
            expect_timeout=False,
        )
Exemplo n.º 7
0
    def run_test(self):
        with self.run_node_with_connections(
                "Scenario 1: Low fee, non-whitelisted peer",
                0, ["-blockmintxfee=0.00001"],
                number_of_connections=1) as (conn, ):

            mining_fee = 1.01  # in satoshi per byte
            relayfee = float(conn.rpc.getnetworkinfo()["relayfee"] * COIN /
                             1000) + 0.01  # in satoshi per byte

            # create block with coinbase
            coinbase = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            #mature the coinbase
            conn.rpc.generate(150)

            funding_tx = self.create_tx([(coinbase, 0)], 10, 1.5)

            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])
            conn.rpc.generate(1)

            last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
            block = create_block(
                int(last_block_info["hash"], 16),
                coinbase=create_coinbase(height=last_block_info["height"] + 1),
                nTime=last_block_info["time"] + 1)
            low_fee_tx = self.create_tx([(funding_tx, 0)], 2, relayfee)
            block.vtx.append(low_fee_tx)
            block.hashMerkleRoot = block.calc_merkle_root()
            block.calc_sha256()
            block.solve()

            conn.send_message(msg_block(block))
            wait_until(lambda: conn.rpc.getbestblockhash() == block.hash,
                       check_interval=0.3)

            tx_pays_relay1 = self.create_tx([(low_fee_tx, 0)], 2, relayfee)
            tx_pays_relay2 = self.create_tx([(tx_pays_relay1, 0)], 1, relayfee)
            tx_pays_enough_for_itself = self.create_tx([(tx_pays_relay1, 1)],
                                                       1, mining_fee)
            tx_pays_for_ancestors = self.create_tx([(tx_pays_relay2, 0)], 1,
                                                   3.5 * mining_fee)
            tx_pays_relay3 = self.create_tx([(tx_pays_for_ancestors, 0)], 1,
                                            relayfee)

            conn.send_message(msg_tx(tx_pays_relay1))
            check_mempool_equals(conn.rpc, [tx_pays_relay1])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                       )  #"should be coinbase only"

            conn.send_message(msg_tx(tx_pays_relay2))
            check_mempool_equals(conn.rpc, [tx_pays_relay1, tx_pays_relay2])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                       )  #"should be coinbase only"

            conn.send_message(msg_tx(tx_pays_enough_for_itself))
            check_mempool_equals(
                conn.rpc,
                [tx_pays_relay1, tx_pays_relay2, tx_pays_enough_for_itself])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                       )  #"should be coinbase only"

            #try  to rebuild journal, mining candidate should stay the same
            conn.rpc.rebuildjournal()
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1)

            # this will trigger cpfp for two unpaying antcestors (tx_pays_relay1 and tx_pays_relay1) then
            # after that tx_pays_enough_for_itself will be free of ancestor debt and it will be accepted also
            conn.send_message(msg_tx(tx_pays_for_ancestors))
            check_mempool_equals(conn.rpc, [
                tx_pays_relay1, tx_pays_relay2, tx_pays_enough_for_itself,
                tx_pays_for_ancestors
            ])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 5
                       ),  #"all tx plus coinbase"

            # still, non paying child of the tx_pays_for_ancestors will not be accepted
            conn.send_message(msg_tx(tx_pays_relay3))
            check_mempool_equals(conn.rpc, [
                tx_pays_relay1, tx_pays_relay2, tx_pays_enough_for_itself,
                tx_pays_for_ancestors, tx_pays_relay3
            ])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 5
                       ),  #"all tx, except tx_pays_relay3 plus coinbase"

            #try  to rebuild journal, mining candidate should stay the same
            conn.rpc.rebuildjournal()
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 5)

            # we will mine a new block, all transactions from the journal will end up in new block, mempool will contain
            # only tx_pays_relay3
            conn.rpc.generate(1)
            check_mempool_equals(conn.rpc, [tx_pays_relay3])

            # now we invalidate block two blocks, at this point tx_pays_for_ancestors does not pay enough for
            # all non-paying children, nothing will end up in the journal.
            # non paying children are: low_fee_tx, tx_pays_relay1, tx_pays_relay2
            conn.rpc.invalidateblock(block.hash)
            check_mempool_equals(conn.rpc, [
                low_fee_tx, tx_pays_relay1, tx_pays_relay2,
                tx_pays_enough_for_itself, tx_pays_for_ancestors,
                tx_pays_relay3
            ])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                       )  #"should be coinbase only"

            # when we reconsider invalidate block, everything should be the same
            conn.rpc.reconsiderblock(block.hash)
            check_mempool_equals(conn.rpc, [tx_pays_relay3])
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                       )  #"should be coinbase only"
    def run_test(self):
        with self.run_node_with_connections(
                "Preparation",
                0, [
                    "-blockmintxfee=0.00001",
                    "-relayfee=0.000005",
                    "-checkmempool=0",
                ],
                number_of_connections=1) as (conn, ):
            mining_fee = 1.1

            # create block with coinbase
            coinbase = create_coinbase(pubkey=self.public_key, height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            # mature the coinbase
            conn.rpc.generate(150)

            funding_tx = self.create_tx([(coinbase, 0)], 2006, mining_fee)
            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])

            # generates a root block with our funding transaction
            conn.rpc.generate(1)

            # create 2000 standard p2pk transactions
            a1_txs = []
            for m in range(2000):
                a1_txs.append(self.create_tx([(funding_tx, m)], 1, mining_fee))

            a1_spends = []
            for a1_tx in a1_txs:
                a1_spends.append(self.create_tx([(a1_tx, 0)], 1, mining_fee))

            # create 2000 standard p2pk transactions which are spending the same outputs as a1_txs
            double_spend_txs = []
            for m in range(2000):
                double_spend_txs.append(
                    self.create_tx([(funding_tx, m)], 1, mining_fee))

            TX_COUNT = 8
            # create for pairs of long-evaluating transactions for blocks b1, b2, c1, and c2
            long_eval_txs = []
            for m in range(2000, 2006):
                long_eval_txs.append(
                    self.create_tx([(funding_tx, m)],
                                   1,
                                   0.0001,
                                   make_long_eval_script=True))
                for _ in range(TX_COUNT - 1):
                    long_eval_txs.append(
                        self.create_tx([(long_eval_txs[-1], 0)],
                                       1,
                                       0.0001,
                                       make_long_eval_script=True))

            root_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
            root_hash = root_block_info["hash"]
            root_height = root_block_info["height"]
            root_time = root_block_info["time"]

            # create all blocks needed for this test
            block_a1 = self.make_block(a1_txs, root_hash, root_height,
                                       root_time)
            block_b1 = self.make_block(
                long_eval_txs[0 * TX_COUNT:1 * TX_COUNT], root_hash,
                root_height, root_time)
            block_b2 = self.make_block(
                long_eval_txs[1 * TX_COUNT:2 * TX_COUNT], block_b1.hash,
                root_height + 1, root_time + 100)
            block_c1 = self.make_block(
                long_eval_txs[2 * TX_COUNT:3 * TX_COUNT], root_hash,
                root_height, root_time)
            block_c2 = self.make_block(
                long_eval_txs[3 * TX_COUNT:4 * TX_COUNT], block_c1.hash,
                root_height + 1, root_time + 101)
            block_d1 = self.make_block(
                long_eval_txs[4 * TX_COUNT:5 * TX_COUNT], root_hash,
                root_height, root_time)
            block_d2 = self.make_block(
                long_eval_txs[5 * TX_COUNT:6 * TX_COUNT], block_d1.hash,
                root_height + 1, root_time + 102)

            conn.send_message(msg_block(block_a1))
            wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash,
                       check_interval=0.3)

        with self.run_node_with_connections(
                "1. Try sending the same transaction that are in the disconnected block during the reorg",
                0, [
                    "-blockmintxfee=0.00001",
                    "-relayfee=0.000005",
                    "-maxtxsizepolicy=0",
                    "-maxstdtxnsperthreadratio=1",
                    "-maxnonstdtxnsperthreadratio=1",
                    '-maxnonstdtxvalidationduration=100000',
                    '-maxtxnvalidatorasynctasksrunduration=100001',
                    '-genesisactivationheight=1',
                    '-maxstackmemoryusageconsensus=2GB',
                    "-maxscriptsizepolicy=2GB",
                    "-acceptnonstdoutputs=1",
                ],
                number_of_connections=1) as (conn, ):

            # send all transactions form block a1 at once and flood the PTV
            for tx in a1_txs:
                conn.send_message(msg_tx(tx))

            # announce blocks b1, and b2 and send them triggering the reorg
            headers = msg_headers()
            headers.headers.append(block_b1)
            headers.headers.append(block_b2)
            conn.send_message(headers)

            conn.send_message(msg_block(block_b1))
            conn.send_message(msg_block(block_b2))

            # here we are having the PTV and PBV working at the same time, filling the mempool while
            # the a1 is disconnected

            # check if everything is as expected
            wait_until(lambda: conn.rpc.getbestblockhash() == block_b2.hash,
                       timeout=60,
                       check_interval=1)
            check_mempool_equals(conn.rpc, a1_txs)

            # now prepare for next scenario
            conn.rpc.invalidateblock(block_b1.hash)
            wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash,
                       check_interval=1)

            # transactions from the disconnected blocks b1 and b2 will not be added to mempool because of
            # the insufficient priority (zero fee)
            check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1)

        with self.run_node_with_connections(
                "2. Try sending transaction that are spending same inputs as transactions in the disconnected block during the reorg",
                0, [
                    "-blockmintxfee=0.00001",
                    "-relayfee=0.000005",
                    "-maxtxsizepolicy=0",
                    "-maxstdtxnsperthreadratio=1",
                    "-maxnonstdtxnsperthreadratio=1",
                    '-maxnonstdtxvalidationduration=100000',
                    '-maxtxnvalidatorasynctasksrunduration=100001',
                    '-genesisactivationheight=1',
                    '-maxstackmemoryusageconsensus=2GB',
                    "-maxscriptsizepolicy=2GB",
                    "-acceptnonstdoutputs=1",
                ],
                number_of_connections=1) as (conn, ):

            # see if everything is still as expected
            wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash,
                       check_interval=1)
            check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1)

            # send all transactions that are the double-spends of txs form block a1
            for double_spend_tx in double_spend_txs:
                conn.send_message(msg_tx(double_spend_tx))

            # announce and send c1, and c2
            headers = msg_headers()
            headers.headers.append(block_c1)
            headers.headers.append(block_c2)
            conn.send_message(headers)

            conn.send_message(msg_block(block_c1))
            conn.send_message(msg_block(block_c2))

            # here we are having the PTV and PBV working at the same time, filling the mempool with double-spends
            # while the a1 is disconnected

            # see if everything is as expected
            wait_until(lambda: conn.rpc.getbestblockhash() == block_c2.hash,
                       timeout=60,
                       check_interval=1)
            # in the mempool we want all transactions for blocks a1
            # while no double_spend_txs should be present
            check_mempool_equals(conn.rpc,
                                 a1_txs,
                                 timeout=60,
                                 check_interval=1)

            # now prepare for next scenario
            conn.rpc.invalidateblock(block_c1.hash)
            wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash,
                       check_interval=1)

            # transactions from the disconnected blocks c1 and c2 will not be added to mempool because of
            # the insufficient priority (zero fee)
            check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1)

        with self.run_node_with_connections(
                "3. Submit transactions that are spending ouputs from disconnecting block and try to mine a block during the reorg",
                0, [
                    "-blockmintxfee=0.00001",
                    "-relayfee=0.000005",
                    "-maxtxsizepolicy=0",
                    '-maxnonstdtxvalidationduration=100000',
                    '-maxtxnvalidatorasynctasksrunduration=100001',
                    '-genesisactivationheight=1',
                    '-maxstackmemoryusageconsensus=2GB',
                    "-maxscriptsizepolicy=2GB",
                    "-acceptnonstdoutputs=1",
                ],
                number_of_connections=1) as (conn, ):

            # see if everything is still as expected
            wait_until(lambda: conn.rpc.getbestblockhash() == block_a1.hash,
                       check_interval=1)
            check_mempool_equals(conn.rpc, [], timeout=60, check_interval=1)

            for tx in a1_spends:
                conn.send_message(msg_tx(tx))

            # send transactions that are spending outputs from the soon-to-be-disconnected block (a1)
            check_mempool_equals(conn.rpc, a1_spends, timeout=100)

            # announce blocks d1, and d2 and send them triggering the reorg
            headers = msg_headers()
            headers.headers.append(block_d1)
            headers.headers.append(block_d2)
            conn.send_message(headers)

            conn.send_message(msg_block(block_d1))
            conn.send_message(msg_block(block_d2))

            # lets give a chance for reorg to start
            sleep(0.5)

            # we are in the middle of the reorg, let try to mine a block
            # if we are in inconsistent state this call would fail
            conn.rpc.generate(1)
Exemplo n.º 9
0
    def _test_chain(self, outpoint1, outpoint2):

        limitancestorcount = 20
        limitcpfpgroupmemberscount = 10

        with self.run_node_with_connections(
                "Tests for ancestors count limit for primary and secondary mempool. "
                "Transactions arranged in chain.",
                0,
            [
                "-blockmintxfee=0.00001",
                "-relayfee=0.000005",
                f"-limitancestorcount={limitancestorcount}",
                f"-limitcpfpgroupmemberscount={limitcpfpgroupmemberscount}",
                "-checkmempool=1",
            ],
                number_of_connections=1) as (conn, ):

            mining_fee = 1.001  # in satoshi per byte
            relayfee = 0.501  # in satoshi per byte

            rejected_txs = []

            def on_reject(conn, msg):
                rejected_txs.append(msg)

            conn.cb.on_reject = on_reject

            # create oversized primary mempool chain, the last tx in the chain will be over the limit
            last_outpoint = outpoint1
            primary_mempool_chain = []
            for _ in range(limitancestorcount + 1):
                tx = self.create_tx([last_outpoint], 1, mining_fee)
                primary_mempool_chain.append(tx)
                last_outpoint = (tx, 0)

            # create oversized secondary mempool chain, the last tx in the chain will be over the limit
            last_outpoint = outpoint2
            secondary_mempool_chain = []
            for _ in range(limitcpfpgroupmemberscount + 1):
                tx = self.create_tx([last_outpoint], 1, relayfee)
                secondary_mempool_chain.append(tx)
                last_outpoint = (tx, 0)

            # send transactions to the node
            for tx in primary_mempool_chain[:-1]:
                conn.send_message(msg_tx(tx))

            for tx in secondary_mempool_chain[:-1]:
                conn.send_message(msg_tx(tx))

            # all transactions that are sent should en up in the mempool, chains are at the limit
            check_mempool_equals(
                conn.rpc,
                primary_mempool_chain[:-1] + secondary_mempool_chain[:-1])

            # now send transactions that try to extend chain over the limit, should be rejected
            for tx_to_reject in [
                    primary_mempool_chain[-1], secondary_mempool_chain[-1]
            ]:
                conn.send_message(msg_tx(tx_to_reject))
                wait_until(lambda: len(rejected_txs) == 1)
                assert_equal(rejected_txs[0].data, tx_to_reject.sha256)
                assert_equal(rejected_txs[0].reason, b'too-long-mempool-chain')
                rejected_txs.clear()

            # lets mine transactions from beggining of the chain, this will shorten the chains
            block = self.mine_transactions(
                conn, [primary_mempool_chain[0], secondary_mempool_chain[0]])

            # try to send transactions again, now chains are shorter and transactions will be accepted
            for tx_to_reject in [
                    primary_mempool_chain[-1], secondary_mempool_chain[-1]
            ]:
                conn.send_message(msg_tx(tx_to_reject))
            check_mempool_equals(
                conn.rpc,
                primary_mempool_chain[1:] + secondary_mempool_chain[1:])

            # invalidate the block, this will force mined transactions back to mempool
            # as we do not check chain length after reorg we will end up with long chains in the mempool
            conn.rpc.invalidateblock(block.hash)
            check_mempool_equals(
                conn.rpc, primary_mempool_chain + secondary_mempool_chain)

            # mine all txs from mempool to ensure empty mempool for the next test case
            self.mine_transactions(
                conn, primary_mempool_chain + secondary_mempool_chain)
Exemplo n.º 10
0
    def _test_graph(self, outpoint, mempool_type):

        if mempool_type == "primary":
            limitancestorcount = 3
            limitcpfpgroupmemberscount = 1000
        elif mempool_type == "secondary":
            limitancestorcount = 1000
            limitcpfpgroupmemberscount = 7
        else:
            raise Exception("Unsupported mempool type")

        with self.run_node_with_connections(
                f"Tests for ancestors count limit in {mempool_type} mempool."
                "Transactions arranged in graph.",
                0,
            [
                "-blockmintxfee=0.00001",
                "-relayfee=0.000005",
                f"-limitancestorcount={limitancestorcount}",
                f"-limitcpfpgroupmemberscount={limitcpfpgroupmemberscount}",
                "-checkmempool=1",
            ],
                number_of_connections=1) as (conn, ):
            # ensure that the mempool is empty
            check_mempool_equals(conn.rpc, [])

            rejected_txs = []

            def on_reject(conn, msg):
                rejected_txs.append(msg)

            conn.cb.on_reject = on_reject

            mining_fee = 1.001  # in satoshi per byte
            relayfee = 0.501  # in satoshi per byte

            # create trasactions

            # <transaction_name> (<prim_count>, <sec_count>)
            #
            # prim_count is ancestors count in primary mempool, algorithm == max
            # sec_count is ancestors count in secondary mempool, algorithm == sum
            #
            #
            #                      tx1 (0, 0)
            #         +------------+   |   +------------+
            #         |                |                |
            #    tx2 (1, 1)        tx3 (1, 1)       tx4 (1, 1)
            #         |                |                |
            #         +------------+   |   +------------+
            #                      tx5 (2, 6)
            #                          |
            #                          |
            #                      tx6 (3, 7)

            fee = mining_fee if mempool_type == "primary" else relayfee

            tx1 = self.create_tx([outpoint], 3, fee)
            tx2 = self.create_tx([(tx1, 0)], 1, fee)
            tx3 = self.create_tx([(tx1, 1)], 1, fee)
            tx4 = self.create_tx([(tx1, 2)], 1, fee)
            tx5 = self.create_tx([(tx2, 0), (tx3, 0), (tx4, 0)], 1, fee)
            tx6 = self.create_tx([(tx5, 0)], 1, fee)

            conn.send_message(msg_tx(tx1))
            conn.send_message(msg_tx(tx2))
            conn.send_message(msg_tx(tx3))
            conn.send_message(msg_tx(tx4))
            conn.send_message(msg_tx(tx5))

            # up to now all txs are accepted
            check_mempool_equals(conn.rpc, [tx1, tx2, tx3, tx4, tx5])

            # tx6 will be rejected because it is limit of ancestors count
            conn.send_message(msg_tx(tx6))
            wait_until(lambda: len(rejected_txs) == 1)
            assert_equal(rejected_txs[0].data, tx6.sha256)
            assert_equal(rejected_txs[0].reason, b'too-long-mempool-chain')

            # now mine tx1 to shorten the chain
            block = self.mine_transactions(conn, [tx1])

            # now we can add tx6 to mempool
            conn.send_message(msg_tx(tx6))
            check_mempool_equals(conn.rpc, [tx2, tx3, tx4, tx5, tx6])

            # invalidate the block, this will force mined transactions back to mempool
            # as we do not check chain length after reorg we will end up with long chains in the mempool
            conn.rpc.invalidateblock(block.hash)
            check_mempool_equals(conn.rpc, [tx1, tx2, tx3, tx4, tx5, tx6])

            # mine all txs from mempool to ensure empty mempool for the next test case
            self.mine_transactions(conn, [tx1, tx2, tx3, tx4, tx5, tx6])
Exemplo n.º 11
0
    def run_test(self):

        with self.run_node_with_connections(
                "Eviction order test; fill the memppol over its size and see what txs will be evicted.",
                0,
            [
                "-blockmintxfee=0.00001",  # 1 satoshi/byte
                "-minrelaytxfee=0",
                "-maxmempool=300MB",
                "-maxmempoolsizedisk=0",
                "-genesisactivationheight=1",
                '-maxstdtxvalidationduration=5000',
                '-maxnonstdtxvalidationduration=5001',
                '-maxstackmemoryusageconsensus=5MB',
                '-maxstackmemoryusagepolicy=5MB',
                '-maxscriptsizepolicy=5MB',
                '-checkmempool=0',
            ],
                number_of_connections=1) as (conn, ):

            mining_fee = 1.01  # in satoshi per byte

            # create block with coinbase
            coinbase1 = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase1)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=1)

            coinbase2 = create_coinbase(height=2)
            second_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                        coinbase=coinbase2)
            second_block.solve()
            conn.send_message(msg_block(second_block))
            wait_until(
                lambda: conn.rpc.getbestblockhash() == second_block.hash,
                check_interval=1)

            #mature the coinbase
            conn.rpc.generate(100)

            funding_tx = self.create_tx([(coinbase1, 0), (coinbase2, 0)], 16,
                                        mining_fee, 0)

            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])
            conn.rpc.generate(1)

            #        (funding_tx, 0)          (funding_tx, 1)  (funding_tx, 2)  (funding_tx, 3)   (funding_tx, 4-14)       (funding_tx, 15)
            # ---------------------------------------------------------------------------------------------------------------------
            #           group1tx1                lowPaying3           tx1            tx3            (long chain of      (chain of high
            #               |                         |                |              |             high paying txs)    paying txs used to
            #           group1tx2                lowPaying4           tx2          lowPaying5     to fill the mempool)  push low paying txs
            #          /          \                                                                                      out of mempools)
            #    group1paying   group2tx1
            #         |             |
            #    lowPaying1     group2tx2
            #                       |
            #                   group2paying
            #                       |
            #                   lowPaying2

            group1tx1 = self.create_tx([(funding_tx, 0)],
                                       noutput=1,
                                       feerate=0,
                                       totalSize=ONE_MEGABYTE)
            group1tx2 = self.create_tx([(group1tx1, 0)],
                                       noutput=2,
                                       feerate=0,
                                       totalSize=ONE_MEGABYTE)
            group1paying = self.create_tx(
                [(group1tx2, 0)],
                noutput=1,
                feerate=1.4,
                totalSize=ONE_MEGABYTE,
                size_of_nonpayin_txs=self.tx_size(group1tx1) +
                self.tx_size(group1tx2))

            group2tx1 = self.create_tx([(group1tx2, 1)],
                                       noutput=1,
                                       feerate=0,
                                       totalSize=ONE_MEGABYTE)
            group2tx2 = self.create_tx([(group2tx1, 0)],
                                       noutput=1,
                                       feerate=0,
                                       totalSize=ONE_MEGABYTE)
            group2paying = self.create_tx(
                [(group2tx2, 0)],
                noutput=1,
                feerate=1.6,
                totalSize=ONE_MEGABYTE,
                size_of_nonpayin_txs=self.tx_size(group2tx1) +
                self.tx_size(group2tx2))

            tx1 = self.create_tx([(funding_tx, 2)],
                                 noutput=1,
                                 feerate=1.1,
                                 totalSize=ONE_MEGABYTE)
            tx2 = self.create_tx([(tx1, 0)],
                                 noutput=1,
                                 feerate=1.8,
                                 totalSize=ONE_MEGABYTE)
            tx3 = self.create_tx([(funding_tx, 3)],
                                 noutput=1,
                                 feerate=1.1,
                                 totalSize=ONE_MEGABYTE)

            lowPaying1 = self.create_tx([(group1paying, 0)],
                                        noutput=1,
                                        feerate=0.1,
                                        totalSize=ONE_MEGABYTE)
            lowPaying2 = self.create_tx([(group2paying, 0)],
                                        noutput=1,
                                        feerate=0.2,
                                        totalSize=ONE_MEGABYTE)
            lowPaying3 = self.create_tx([(funding_tx, 1)],
                                        noutput=1,
                                        feerate=0.3,
                                        totalSize=ONE_MEGABYTE)
            lowPaying4 = self.create_tx([(lowPaying3, 0)],
                                        noutput=1,
                                        feerate=0.4,
                                        totalSize=ONE_MEGABYTE)
            lowPaying5 = self.create_tx([(tx3, 0)],
                                        noutput=1,
                                        feerate=0.5,
                                        totalSize=ONE_MEGABYTE)

            primaryMempoolTxs = [
                group1tx1, group1tx2, group1paying, group2tx1, group2tx2,
                group2paying, tx1, tx2, tx3
            ]
            secondaryMempoolTxs = [
                lowPaying1, lowPaying2, lowPaying3, lowPaying4, lowPaying5
            ]

            for tx in primaryMempoolTxs + secondaryMempoolTxs:
                conn.send_message(msg_tx(tx))
            check_mempool_equals(conn.rpc,
                                 primaryMempoolTxs + secondaryMempoolTxs)
            wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == len(
                primaryMempoolTxs) + 1)

            txs_in_mempool = set(primaryMempoolTxs + secondaryMempoolTxs)
            outpoints_to_spend = [(funding_tx, n) for n in range(4, 15)]

            while len(txs_in_mempool) < 299:
                tx = self.create_tx([
                    outpoints_to_spend.pop(0),
                ],
                                    noutput=2,
                                    feerate=5,
                                    totalSize=ONE_MEGABYTE)
                outpoints_to_spend.append((tx, 0))
                outpoints_to_spend.append((tx, 1))
                conn.send_message(msg_tx(tx))
                txs_in_mempool.add(tx)

            check_mempool_equals(conn.rpc,
                                 txs_in_mempool,
                                 timeout=600,
                                 check_interval=2)

            eviction_order = [
                lowPaying1, lowPaying2, lowPaying4, lowPaying3, lowPaying5,
                tx3, group2paying, group2tx2, group2tx1, group1paying,
                group1tx2, group1tx1, tx2, tx1
            ]

            conn.rpc.log.info(f"lowPaying1 = {lowPaying1.hash}")
            conn.rpc.log.info(f"lowPaying2 = {lowPaying2.hash}")
            conn.rpc.log.info(f"lowPaying3 = {lowPaying3.hash}")
            conn.rpc.log.info(f"lowPaying4 = {lowPaying4.hash}")
            conn.rpc.log.info(f"lowPaying5 = {lowPaying5.hash}")
            conn.rpc.log.info(f"tx1 = {tx1.hash}")
            conn.rpc.log.info(f"tx2 = {tx2.hash}")
            conn.rpc.log.info(f"tx3 = {tx3.hash}")
            conn.rpc.log.info(f"group2paying = {group2paying.hash}")
            conn.rpc.log.info(f"group2tx2 = {group2tx2.hash}")
            conn.rpc.log.info(f"group2tx1 = {group2tx1.hash}")
            conn.rpc.log.info(f"group1paying = {group1paying.hash}")
            conn.rpc.log.info(f"group1tx2 = {group1tx2.hash}")
            conn.rpc.log.info(f"group1tx1 = {group1tx1.hash}")

            outpoint_to_spend = (funding_tx, 15)

            for evicting in eviction_order:
                tx = self.create_tx([
                    outpoint_to_spend,
                ],
                                    noutput=1,
                                    feerate=30,
                                    totalSize=ONE_MEGABYTE)
                outpoint_to_spend = (tx, 0)
                conn.send_message(msg_tx(tx))
                txs_in_mempool.add(tx)
                txs_in_mempool.remove(evicting)
                check_mempool_equals(conn.rpc,
                                     txs_in_mempool,
                                     check_interval=0.5,
                                     timeout=60)

                # when there are still some secondary mempool transaction in the mempool
                if len(txs_in_mempool & set(secondaryMempoolTxs)) != 0:
                    # the mempoolminfee should not exceed blockmintxfee
                    assert conn.rpc.getmempoolinfo(
                    )['mempoolminfee'] <= conn.rpc.getsettings(
                    )['blockmintxfee']

        with self.run_node_with_connections(
                "Restart the node with using the disk for storing transactions.",
                0,
            [
                "-blockmintxfee=0.00001",  # 1 satoshi/byte
                "-minrelaytxfee=0",
                "-maxmempool=300MB",
                "-maxmempoolsizedisk=10MB",
                "-genesisactivationheight=1",
                '-maxstdtxvalidationduration=5000',
                '-maxnonstdtxvalidationduration=5001',
                '-maxstackmemoryusageconsensus=5MB',
                '-maxstackmemoryusagepolicy=5MB',
                '-maxscriptsizepolicy=5MB',
                '-checkmempool=0',
            ],
                number_of_connections=1) as (conn, ):

            # check that we have all txs in the mempool
            check_mempool_equals(conn.rpc,
                                 txs_in_mempool,
                                 check_interval=1,
                                 timeout=60)

            # check that we are not using the tx database
            assert conn.rpc.getmempoolinfo()['usagedisk'] == 0

            #now we have room for some more txs
            for _ in range(3):
                tx = self.create_tx([
                    outpoint_to_spend,
                ],
                                    noutput=1,
                                    feerate=1,
                                    totalSize=ONE_MEGABYTE)
                outpoint_to_spend = (tx, 0)
                conn.send_message(msg_tx(tx))
                txs_in_mempool.add(tx)
                check_mempool_equals(conn.rpc,
                                     txs_in_mempool,
                                     check_interval=0.5,
                                     timeout=60)

            # make sure that we are using the tx database now
            assert conn.rpc.getmempoolinfo()['usagedisk'] != 0

        with self.run_node_with_connections(
                "Restart the node once again to see if transaction were stored in the db.",
                0,
            [
                "-blockmintxfee=0.00001",  # 1 satoshi/byte
                "-minrelaytxfee=0",
                "-maxmempool=300MB",
                "-maxmempoolsizedisk=10MB",
                "-genesisactivationheight=1",
                '-maxstdtxvalidationduration=5000',
                '-maxnonstdtxvalidationduration=5001',
                '-maxstackmemoryusageconsensus=5MB',
                '-maxstackmemoryusagepolicy=5MB',
                '-maxscriptsizepolicy=5MB',
                '-checkmempool=0',
            ],
                number_of_connections=1) as (conn, ):

            # check that we have all txs in the mempool
            check_mempool_equals(conn.rpc,
                                 txs_in_mempool,
                                 check_interval=1,
                                 timeout=60)

            # make sure that we are using the tx database
            assert conn.rpc.getmempoolinfo()['usagedisk'] != 0
Exemplo n.º 12
0
    def run_cpfp_scenario1(self,
                           conn,
                           txchains,
                           last_descendant_from_each_txchain,
                           chain_length,
                           num_of_chains,
                           mining_fee,
                           locking_script,
                           rpcsend=None,
                           timeout=240):
        #
        # Send low fee (paying relay_fee) txs to the node.
        #
        exp_mempool_size = conn.rpc.getmempoolinfo()['size'] + len(txchains)
        elapsed1 = self.send_txs(rpcsend, conn, txchains, exp_mempool_size,
                                 timeout)
        # Check if mempool contains all low fee txs.
        check_mempool_equals(conn.rpc, txchains, timeout)

        #
        # Check getminingcandidate result: There should be no cpfp txs in the block template, due to low fees.
        #
        wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == 1
                   )  # there should be coinbase tx only

        #
        # Create and send cpfp txs (paying mining_fee).
        #
        cpfp_txs_pay_for_ancestors = []
        for tx in last_descendant_from_each_txchain:
            cpfp_txs_pay_for_ancestors.append(
                self.create_tx([(tx, 0)], 2, (chain_length + 1) * (mining_fee),
                               locking_script))
        # Send cpfp txs.
        exp_mempool_size = conn.rpc.getmempoolinfo()['size'] + len(
            cpfp_txs_pay_for_ancestors)
        elapsed2 = self.send_txs(rpcsend, conn, cpfp_txs_pay_for_ancestors,
                                 exp_mempool_size, timeout)
        # Check if there is a required number of txs in the mempool.
        check_mempool_equals(conn.rpc, cpfp_txs_pay_for_ancestors + txchains,
                             timeout)

        #
        # Check getminingcandidate result: There should be all cpfp txs (+ ancestor txs) in the block template.
        #
        wait_until(lambda: conn.rpc.getminingcandidate()["num_tx"] == len(
            cpfp_txs_pay_for_ancestors + txchains) + 1)  # +1 a coinbase tx

        #
        # Collect stats.
        #
        interface_name = "p2p" if rpcsend is None else rpcsend._service_name
        self.log.info(
            "[%s]: Submit and process %d txchains of length %d (%d relay_fee std txs) [time duration: %.6f sec]",
            interface_name, num_of_chains, chain_length,
            num_of_chains * chain_length, elapsed1)
        self.log.info(
            "[%s]: Submit and process %d cpfp std txs (each pays mining_fee) [time duration: %.6f sec]",
            interface_name, len(cpfp_txs_pay_for_ancestors), elapsed2)
        self.log.info(
            "[%s]: Total time to submit and process %d std txs took %.6f sec",
            interface_name,
            num_of_chains * chain_length + len(cpfp_txs_pay_for_ancestors),
            elapsed1 + elapsed2)

        return elapsed1 + elapsed2
Exemplo n.º 13
0
    def run_test(self):
        with self.run_node_with_connections(
                "Scenario 1: Create complex graph of txs, doublespend some fo them and check mempool after",
                0, ["-minrelaytxfee=0"],
                number_of_connections=1) as (conn, ):

            # create block with coinbase
            coinbase = create_coinbase(height=1)
            first_block = create_block(int(conn.rpc.getbestblockhash(), 16),
                                       coinbase=coinbase)
            first_block.solve()
            conn.send_message(msg_block(first_block))
            wait_until(lambda: conn.rpc.getbestblockhash() == first_block.hash,
                       check_interval=0.3)

            #mature the coinbase
            conn.rpc.generate(150)

            funding_tx = self.create_tx([(coinbase, 0)], 2)

            conn.send_message(msg_tx(funding_tx))
            check_mempool_equals(conn.rpc, [funding_tx])
            conn.rpc.generate(1)

            last_block_info = conn.rpc.getblock(conn.rpc.getbestblockhash())
            block = create_block(
                int(last_block_info["hash"], 16),
                coinbase=create_coinbase(height=last_block_info["height"] + 1),
                nTime=last_block_info["time"] + 1)
            low_fee_tx = self.create_tx([(funding_tx, 0)], 2)
            block.vtx.append(low_fee_tx)
            block.hashMerkleRoot = block.calc_merkle_root()
            block.calc_sha256()
            block.solve()

            conn.send_message(msg_block(block))
            wait_until(lambda: conn.rpc.getbestblockhash() == block.hash,
                       check_interval=0.3)

            #   tx_double_spend_mempool            tx_to_be_mined
            #          |      |                      |        |
            #     -------------------------------------------------------
            #          |      |                      |        |
            #          |      +-------+     +--------+        |
            #          |              |     |                 |
            #          |     tx_descedant_of_conflict_1   tx_stay_in_mempool_1
            #          |                 |                    |        |
            #          |                 |                    |        |
            #          |        tx_descedant_of_conflict_2    |    tx_stay_in_mempool_2
            #          |                          |           |               |
            #          |                          |           |               |
            #          |                      tx_descedant_of_conflict_3      |
            #          |                                                      |
            #          +---------+           +--------------------------------+
            #                    |           |
            #                tx_descedant_of_conflict_4

            tx_double_spend_mempool = self.create_tx([(low_fee_tx, 0)], 2)
            tx_double_spend_block = self.create_tx([(low_fee_tx, 0)], 1)
            tx_to_be_mined = self.create_tx([(low_fee_tx, 1)], 2)
            tx_descedant_of_conflict_1 = self.create_tx(
                [(tx_double_spend_mempool, 0), (tx_to_be_mined, 0)], 1)
            tx_descedant_of_conflict_2 = self.create_tx(
                [(tx_descedant_of_conflict_1, 0)], 1)
            tx_stay_in_mempool_1 = self.create_tx([(tx_to_be_mined, 1)], 2)
            tx_descedant_of_conflict_3 = self.create_tx(
                [(tx_descedant_of_conflict_2, 0),
                 (tx_stay_in_mempool_1, 0)], 2)
            tx_stay_in_mempool_2 = self.create_tx([(tx_stay_in_mempool_1, 1)],
                                                  1)
            tx_descedant_of_conflict_4 = self.create_tx(
                [(tx_double_spend_mempool, 1), (tx_stay_in_mempool_2, 0)], 1)

            conn.send_message(msg_tx(tx_double_spend_mempool))
            conn.send_message(msg_tx(tx_to_be_mined))
            conn.send_message(msg_tx(tx_descedant_of_conflict_1))
            conn.send_message(msg_tx(tx_descedant_of_conflict_2))
            conn.send_message(msg_tx(tx_stay_in_mempool_1))
            conn.send_message(msg_tx(tx_descedant_of_conflict_3))
            conn.send_message(msg_tx(tx_stay_in_mempool_2))
            conn.send_message(msg_tx(tx_descedant_of_conflict_4))

            check_mempool_equals(conn.rpc, [
                tx_double_spend_mempool, tx_to_be_mined,
                tx_descedant_of_conflict_1, tx_descedant_of_conflict_2,
                tx_stay_in_mempool_1, tx_descedant_of_conflict_3,
                tx_stay_in_mempool_2, tx_descedant_of_conflict_4
            ])

            block2 = create_block(
                block.sha256,
                coinbase=create_coinbase(height=last_block_info["height"] + 2),
                nTime=last_block_info["time"] + 2)
            block2.vtx.append(tx_double_spend_block)
            block2.vtx.append(tx_to_be_mined)
            block2.hashMerkleRoot = block2.calc_merkle_root()
            block2.calc_sha256()
            block2.solve()

            conn.send_message(msg_block(block2))
            wait_until(lambda: conn.rpc.getbestblockhash() == block2.hash,
                       check_interval=0.3)

            check_mempool_equals(conn.rpc,
                                 [tx_stay_in_mempool_1, tx_stay_in_mempool_2])