def test_parallelchains(self):
        """
            1. Verify preciousblock does not activate soft rejected block.
            2. Test marking of blocks with softrejectblock with two parallel chains.
        """
        b1_hash = self.nodes[0].generate(1)[0]
        sync_blocks(self.nodes)

        disconnect_nodes_bi(self.nodes, 0, 1)

        blockhashes_node0 = self.nodes[0].generate(2)
        b2_hash = blockhashes_node0[0]
        b3_hash = blockhashes_node0[1]
        blockhashes_node1 = self.nodes[1].generate(2)
        b2a_hash = blockhashes_node1[0]
        b3a_hash = blockhashes_node1[1]

        connect_nodes_bi(self.nodes, 0, 1)
        self.wait_for_chain_tips(self.nodes[0], {b3_hash, b3a_hash})

        # 1. Verify preciousblock does not activate soft rejected block.
        self.nodes[0].softrejectblock(b2_hash, 1)
        self.nodes[0].waitforblockheight(3)
        assert_equal(self.nodes[0].getbestblockhash(), b3a_hash)
        self.nodes[0].preciousblock(b3_hash)
        assert_equal(self.nodes[0].getbestblockhash(), b3a_hash)

        self.nodes[0].acceptblock(b2_hash)

        # 2. Test marking of blocks with softrejectblock with two parallel chains.
        self.nodes[0].softrejectblock(b1_hash, 3)
        assert_equal(self.soft_rej_blocks_hashes(self.nodes[0]),
                     {b1_hash, b2_hash, b2a_hash, b3_hash, b3a_hash})
        self.nodes[1].generate(2)
        sync_blocks(self.nodes)
    def run_test(self):
        # Start by creating some coinbases we can spend later
        self.nodes[0].generate(150)
        sync_blocks(self.nodes[0:3])

        # Connect node2 just to node1 so that it is forced to request the next blocks
        # from a slow sending peer
        disconnect_nodes_bi(self.nodes, 0, 2)
        disconnect_nodes_bi(self.nodes, 1, 2)
        connect_nodes(self.nodes, 2, 1)

        # Extend the chain with a big block and some more small blocks
        utxos = []
        mine_large_block(self.nodes[0], utxos)
        large_block_hash = self.nodes[0].getbestblockhash()
        self.nodes[0].generate(5)
        sync_blocks(self.nodes[0:2])

        # Ensure node2 has started to request the big block from slow node1
        def blockInFlight(blockNum):
            inflight = self.nodes[2].getpeerinfo()[0]["inflight"]
            return blockNum in inflight

        wait_until(lambda: blockInFlight(151), check_interval=1)

        # Reconnect node2 to node0 so that it has another option from which to fetch blocks
        connect_nodes(self.nodes, 2, 0)
        sync_blocks(self.nodes[0:3])

        # Check that a parallel fetch to node0 was triggered from node2
        assert (check_for_log_msg(
            self, "Triggering parallel block download for {}".format(
                large_block_hash), "/node2"))
    def test_with_preciousblock(self):
        """
            Test softrejectblock in combination with precious block
        """
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)

        disconnect_nodes_bi(self.nodes, 0, 1)

        blockhashes_node0 = self.nodes[0].generate(4)
        b2_hash = blockhashes_node0[0]
        b5_hash = blockhashes_node0[3]
        blockhashes_node1 = self.nodes[1].generate(3)
        b4a_hash = blockhashes_node1[2]

        connect_nodes_bi(self.nodes, 0, 1)
        self.wait_for_chain_tips(self.nodes[0], {b5_hash, b4a_hash})

        self.nodes[0].softrejectblock(b2_hash, 3)
        self.nodes[0].waitforblockheight(4)
        assert_equal(self.nodes[0].getbestblockhash(), b4a_hash)

        # call preciousblock to verify it will not delete block candidates with soft rejected status.
        self.nodes[0].preciousblock(b4a_hash)
        # reconsider b2_hash to verify we are still able to reach previous longest chain.
        self.nodes[0].acceptblock(b2_hash)
        assert_equal(self.nodes[0].getbestblockhash(), b5_hash)
Beispiel #4
0
    def run_test(self):
        # Get out of IBD
        self.nodes[0].generate(1)
        self.sync_all()

        # Stop node so we can restart it with our connections
        self.stop_node(0)

        # Disconnect node1 and node2 for now
        disconnect_nodes_bi(self.nodes, 1, 2)

        connArgs = [ { "versionNum":MY_VERSION }, { "versionNum":70015 } ]
        with self.run_node_with_connections("Test old and new protocol versions", 0, self.nodeArgs, number_of_connections=2,
                                            connArgs=connArgs, cb_class=MyConnCB) as (newVerConn,oldVerConn):
            assert newVerConn.connected
            assert oldVerConn.connected

            # Generate small block, verify we get it over both connections
            self.nodes[0].generate(1)
            wait_until(lambda: newVerConn.cb.block_count == 1, timeout=int(30 * self.options.timeoutfactor))
            wait_until(lambda: oldVerConn.cb.block_count == 1, timeout=int(30 * self.options.timeoutfactor))

            # Get us a spendable output
            coinbase_tx = self.make_coinbase(newVerConn)
            self.nodes[0].generate(100)

            # Put some large txns into the nodes mempool until it exceeds 4GB in size
            self.create_and_send_transactions(newVerConn, coinbase_tx, 5)

            # Reconnect node0 and node2 and sync their blocks. Node2 will end up receiving the
            # large block via compact blocks
            connect_nodes(self.nodes, 0, 2)
            sync_blocks(itemgetter(0,2)(self.nodes))

            # Mine a >4GB block, verify we only get it over the new connection
            old_block_count = newVerConn.cb.block_count
            logger.info("Mining a big block")
            self.nodes[0].generate(1)
            assert(self.nodes[0].getmempoolinfo()['size'] == 0)
            logger.info("Waiting for block to arrive at test")
            wait_until(lambda: newVerConn.cb.block_count == old_block_count+1, timeout=int(1200 * self.options.timeoutfactor))

            # Look for log message saying we won't send to old peer
            wait_until(lambda: check_for_log_msg(self, "cannot be sent because it exceeds max P2P message limit", "/node0"))

            # Verify node2 gets the big block via a (not very) compact block
            wait_until(lambda: self.nodes[0].getbestblockhash() == self.nodes[2].getbestblockhash())
            peerinfo = self.nodes[2].getpeerinfo()
            assert(peerinfo[0]['bytesrecv_per_msg']['cmpctblock'] > 0)
            assert(peerinfo[0]['bytesrecv_per_msg']['blocktxn'] > 0)

            # Reconnect node0 to node1
            logger.info("Syncing bitcoind nodes to big block")
            connect_nodes(self.nodes, 0, 1)
            self.sync_all(timeout=int(1200 * self.options.timeoutfactor))

            # Verify node1 also got the big block
            assert(self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash())
Beispiel #5
0
    def run_test(self):
        # Synchronize mc_node1, mc_node2 and mc_node3, then disconnect them.
        self.sync_all()
        disconnect_nodes_bi(self.nodes, 0, 1)
        disconnect_nodes_bi(self.nodes, 0, 2)
        mc_node1 = self.nodes[0]
        mc_node2 = self.nodes[1]
        mc_node3 = self.nodes[2]
        sc_node1 = self.sc_nodes[0]

        # Test 1: Generate SC block, when all MC blocks already synchronized.
        # Generate 1 SC block
        scblock_id0 = generate_next_blocks(sc_node1, "first node", 1)[0]
        # Verify that SC block has no MC headers, ref data, ommers
        check_mcheaders_amount(0, scblock_id0, sc_node1)
        check_mcreferencedata_amount(0, scblock_id0, sc_node1)
        check_ommers_amount(0, scblock_id0, sc_node1)

        # Test 2: Generate SC block, when new MC blocks following the same Tip appear.
        # Generate 1 MC block on the first MC node
        mcblock_hash1 = mc_node1.generate(1)[0]
        # Generate 1 SC block
        scblock_id1 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id1, sc_node1)
        # Verify that SC block contains MC block as a MainchainReference
        check_mcheaders_amount(1, scblock_id1, sc_node1)
        check_mcreferencedata_amount(1, scblock_id1, sc_node1)
        check_mcreference_presence(mcblock_hash1, scblock_id1, sc_node1)
        check_ommers_amount(0, scblock_id1, sc_node1)

        # Test 3: Generate SC block, when new MC blocks following different Tip appear. Ommers expected.
        # Generate another 2 MC blocks on the second MC node
        fork_mcblock_hash1 = mc_node2.generate(1)[0]
        fork_mcblock_hash2 = mc_node2.generate(1)[0]

        # Connect and synchronize MC node 1 to MC node 2
        connect_nodes_bi(self.nodes, 0, 1)
        self.sync_nodes([mc_node1, mc_node2])
        # MC Node 1 should replace mcblock_hash1 Tip with [fork_mcblock_hash1, fork_mcblock_hash2]
        assert_equal(fork_mcblock_hash2, mc_node1.getbestblockhash())

        # Generate 1 SC block
        scblock_id2 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id2, sc_node1)
        # Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
        check_mcheaders_amount(2, scblock_id2, sc_node1)
        check_mcreferencedata_amount(0, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash1, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash2, scblock_id2, sc_node1)
        # Verify that SC block contains 1 Ommer with 1 MainchainHeader
        check_ommers_amount(1, scblock_id2, sc_node1)
        check_ommers_cumulative_score(1, scblock_id2, sc_node1)
        check_ommer(scblock_id1, [mcblock_hash1], scblock_id2, sc_node1)

        # Test 4: Generate SC block, when new MC blocks following the same Tip appear + 2 previous RefData expecting to be synchronized.
        # Generate 2 more mc blocks in MC node 1
        mcblock_hash3 = mc_node1.generate(1)[0]
        mcblock_hash4 = mc_node1.generate(1)[0]

        # Generate SC block
        scblock_id3 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id2, scblock_id3, sc_node1)
        # Verify that SC block MainchainHeaders and MainchainRefData
        check_mcheaders_amount(2, scblock_id3, sc_node1)
        check_mcreferencedata_amount(3, scblock_id3, sc_node1)
        check_mcheader_presence(mcblock_hash3, scblock_id3, sc_node1)
        check_mcheader_presence(mcblock_hash4, scblock_id3, sc_node1)
        check_mcreferencedata_presence(fork_mcblock_hash1, scblock_id3,
                                       sc_node1)
        check_mcreferencedata_presence(fork_mcblock_hash2, scblock_id3,
                                       sc_node1)
        check_mcreferencedata_presence(mcblock_hash3, scblock_id3, sc_node1)
        check_ommers_amount(0, scblock_id3, sc_node1)

        # Generate SC block to synchronize the rest of MC blocks
        scblock_id4 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id3, scblock_id4, sc_node1)
        # Verify that SC block MainchainHeaders and MainchainRefData
        check_mcheaders_amount(0, scblock_id4, sc_node1)
        check_mcreferencedata_amount(1, scblock_id4, sc_node1)
        check_mcreferencedata_presence(mcblock_hash4, scblock_id4, sc_node1)
        check_ommers_amount(0, scblock_id4, sc_node1)

        # Generate SC block with no MC data. Needed for further test
        scblock_id5 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id4, scblock_id5, sc_node1)
        check_mcheaders_amount(0, scblock_id5, sc_node1)
        check_mcreferencedata_amount(0, scblock_id5, sc_node1)
        check_ommers_amount(0, scblock_id5, sc_node1)

        # Test 5: MC Node 3 generates MC blocks, that hust from sc creation tx containing block.
        #         After MC synchronization, SC node should create with recursive ommers.
        # Generate another 6 blocks on MC node 3
        another_fork_mcblocks_hashes = mc_node3.generate(5)
        another_fork_tip_hash = another_fork_mcblocks_hashes[-1]
        connect_nodes_bi(self.nodes, 0, 2)
        self.sync_all()
        # MC Node 1 should replace mcblock_hash4 Tip with another_fork_tip_hash
        assert_equal(another_fork_tip_hash, mc_node1.getbestblockhash())

        # Generate SC block
        scblock_id6 = generate_next_blocks(sc_node1, "first node", 1)[0]
        # print(json.dumps(sc_node1.block_findById(blockId=scblock_id6), indent=4))
        check_scparent(scblock_id0, scblock_id6, sc_node1)
        # Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
        check_mcheaders_amount(5, scblock_id6, sc_node1)
        check_mcreferencedata_amount(0, scblock_id6, sc_node1)
        for mchash in another_fork_mcblocks_hashes:
            check_mcheader_presence(mchash, scblock_id6, sc_node1)
        # Verify that SC block contains 4 Ommers
        check_ommers_amount(4, scblock_id6, sc_node1)
        # Verify Ommers cumulative score, that must also count 1 subommer
        check_ommers_cumulative_score(5, scblock_id6, sc_node1)
        expected_ommers_ids = [
            scblock_id2, scblock_id3, scblock_id4, scblock_id5
        ]
        for ommer_id in expected_ommers_ids:
            check_ommer(ommer_id, [], scblock_id6, sc_node1)
        check_subommer(scblock_id2, scblock_id1, [mcblock_hash1], scblock_id6,
                       sc_node1)
    def run_test(self):
        mc_node1 = self.nodes[0]
        mc_node2 = self.nodes[1]
        sc_node1 = self.sc_nodes[0]
        sc_node2 = self.sc_nodes[1]

        # Synchronize mc_node1 and mc_node2
        self.sync_all()

        genesis_sc_block_id = sc_node1.block_best()["result"]

        # Generate 1 SC block without any MC block info
        scblock_id0 = generate_next_blocks(sc_node1, "first node", 1)[0]
        # Verify that SC block has no MC headers, ref data, ommers
        check_mcheaders_amount(0, scblock_id0, sc_node1)
        check_mcreferencedata_amount(0, scblock_id0, sc_node1)
        check_ommers_amount(0, scblock_id0, sc_node1)

        # Generate 1 MC block on the first MC node
        mcblock_hash1 = mc_node1.generate(1)[0]
        # Synchronize mc_node1 and mc_node2, then disconnect them.
        self.sync_all()
        disconnect_nodes_bi(self.nodes, 0, 1)

        # Generate 1 more MC block on the first MC node
        mcblock_hash2 = mc_node1.generate(1)[0]

        # Generate 1 SC block, that should put 2 MC blocks inside
        # SC block contains MC `mcblock_hash1` that is common for MC Nodes 1,2 and `mcblock_hash2` that is known only by MC Node 1.
        scblock_id1 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id1, sc_node1)
        # Verify that SC block contains MC block as a MainchainReference
        check_mcheaders_amount(2, scblock_id1, sc_node1)
        check_mcreferencedata_amount(2, scblock_id1, sc_node1)
        check_mcreference_presence(mcblock_hash1, scblock_id1, sc_node1)
        check_mcreference_presence(mcblock_hash2, scblock_id1, sc_node1)
        check_ommers_amount(0, scblock_id1, sc_node1)

        # Generate another 2 MC blocks on the second MC node
        fork_mcblock_hash1 = mc_node2.generate(1)[0]
        fork_mcblock_hash2 = mc_node2.generate(1)[0]

        # Connect and synchronize MC node 1 to MC node 2
        connect_nodes_bi(self.nodes, 0, 1)
        self.sync_all()
        # MC Node 1 should replace mcblock_hash2 Tip with [fork_mcblock_hash1, fork_mcblock_hash2]
        assert_equal(fork_mcblock_hash2, mc_node1.getbestblockhash())

        # Generate 1 SC block
        # SC block must contains `mcblock_hash1` again and add fork_mcblock_hash1,2
        # Ommered block also contains common `mcblock_hash1`, but moreover an orphaned `mcblock_hash2`
        scblock_id2 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id2, sc_node1)
        # Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
        check_mcheaders_amount(3, scblock_id2, sc_node1)
        check_mcreferencedata_amount(0, scblock_id2, sc_node1)
        check_mcheader_presence(mcblock_hash1, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash1, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash2, scblock_id2, sc_node1)
        # Verify that SC block contains 1 Ommer with 1 MainchainHeader
        check_ommers_amount(1, scblock_id2, sc_node1)
        check_ommers_cumulative_score(1, scblock_id2, sc_node1)
        check_ommer(scblock_id1, [mcblock_hash1, mcblock_hash2], scblock_id2, sc_node1)

        assert_equal(genesis_sc_block_id, sc_node2.block_best()["result"])
        connect_sc_nodes(self.sc_nodes[0], 1)
        self.sc_sync_all()

        assert_equal(sc_node1.block_best()["result"], sc_node2.block_best()["result"])
    def _zmq_test(self):
        block_hashes = self.nodes[0].generate(101)
        """Test case 1"""
        tx_hash1 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                               1.0)
        tx_hash2 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                               1.0)

        block_hash1 = self.nodes[0].generate(1)[0]
        # sync blocks so we are synchronized later in test
        sync_blocks(self.nodes)

        # receive notifications for txs to be included in block
        msg1 = self.zmqSubSocket.recv_multipart()
        assert_equal(msg1[0], b"removedfrommempoolblock")
        msg1_body = json.loads(msg1[1])
        assert_equal(msg1_body["reason"], "included-in-block")
        msg2 = self.zmqSubSocket.recv_multipart()
        assert_equal(msg2[0], b"removedfrommempoolblock")
        msg2_body = json.loads(msg2[1])
        assert_equal(msg2_body["reason"], "included-in-block")

        removed_tx = [msg1_body["txid"], msg2_body["txid"]]
        assert_equal(tx_hash1 in removed_tx and tx_hash2 in removed_tx, True)
        """Test case 2"""
        # bring txs back to mempool
        self.nodes[0].invalidateblock(block_hash1)
        # invalidate again so the coins that txs uses are immature
        self.nodes[0].invalidateblock(block_hashes[len(block_hashes) - 2])

        # receive notifications for txs about reorg mempool removal reason
        msg1 = self.zmqSubSocket.recv_multipart()
        assert_equal(msg1[0], b"removedfrommempoolblock")
        msg1_body = json.loads(msg1[1])
        assert_equal(msg1_body["reason"], "reorg")
        msg2 = self.zmqSubSocket.recv_multipart()
        assert_equal(msg2[0], b"removedfrommempoolblock")
        msg2_body = json.loads(msg2[1])
        assert_equal(msg2_body["reason"], "reorg")

        removed_tx = [msg1_body["txid"], msg2_body["txid"]]
        assert_equal(tx_hash1 in removed_tx and tx_hash2 in removed_tx, True)
        """Test case 3"""
        # bring both nodes on same height
        self.nodes[1].invalidateblock(block_hashes[len(block_hashes) - 2])
        self.nodes[0].generate(4)
        sync_blocks(self.nodes)
        unspent = self.nodes[0].listunspent()[0]

        # create tx with spendable output for both nodes to use
        tx_spendable_output = CTransaction()
        tx_outs = [CTxOut(4500000000, CScript([OP_TRUE]))]
        tx_spendable_output.vout = tx_outs
        tx_spendable_output.vin = [
            CTxIn(COutPoint(int(unspent["txid"], 16), 0))
        ]

        tx_hex = self.nodes[0].signrawtransaction(
            ToHex(tx_spendable_output))['hex']
        self.nodes[0].sendrawtransaction(tx_hex, True)
        tx_spendable_output = FromHex(CTransaction(), tx_hex)
        tx_spendable_output.rehash()

        self.nodes[0].generate(1)
        # ignore included in block message
        _ = self.zmqSubSocket.recv_multipart()
        sync_blocks(self.nodes)

        # disconnect nodes and create transaction tx2 on node1 and mine a block
        # then create tx1 on node0 that use same output as tx2.
        disconnect_nodes_bi(self.nodes, 0, 1)

        tx2 = CTransaction()
        tx_outs = [CTxOut(4400000000, CScript([OP_TRUE]))]
        tx2.vout = tx_outs
        tx2.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]

        tx_hex = self.nodes[1].signrawtransaction(ToHex(tx2))['hex']
        tx2_size = len(tx_hex) / 2
        tx2 = FromHex(CTransaction(), tx_hex)
        tx2.rehash()
        self.nodes[1].sendrawtransaction(tx_hex, True)
        blockhash = self.nodes[1].generate(1)[0]

        tx1 = CTransaction()
        tx_outs = [CTxOut(4300000000, CScript([OP_TRUE]))]
        tx1.vout = tx_outs
        tx1.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]

        tx_hex = self.nodes[0].signrawtransaction(ToHex(tx1))['hex']
        tx1 = FromHex(CTransaction(), tx_hex)
        tx1.rehash()
        self.nodes[0].sendrawtransaction(tx_hex, True)

        # connect nodes again and sync blocks, we now expect to get conflict for tx1
        # because tx2 that uses same output as tx1 is already in block.
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes)

        msg = self.zmqSubSocket.recv_multipart()
        assert_equal(msg[0], b"discardedfrommempool")
        body = json.loads(msg[1])
        assert_equal(body["reason"], "collision-in-block-tx")
        assert_equal(body["txid"], tx1.hash)
        assert_equal(body["collidedWith"]["txid"], tx2.hash)
        assert_equal(body["collidedWith"]["size"], tx2_size)
        assert_equal(body["blockhash"], blockhash)
        """Test case 4"""
        # create tx with spendable output for both nodes to use
        unspent = self.nodes[0].listunspent()[0]
        tx_spendable_output = CTransaction()
        tx_outs = [CTxOut(4500000000, CScript([OP_TRUE]))]
        tx_spendable_output.vout = tx_outs
        tx_spendable_output.vin = [
            CTxIn(COutPoint(int(unspent["txid"], 16), 0))
        ]

        tx_hex = self.nodes[0].signrawtransaction(
            ToHex(tx_spendable_output))['hex']
        self.nodes[0].sendrawtransaction(tx_hex, True)
        tx_spendable_output = FromHex(CTransaction(), tx_hex)
        tx_spendable_output.rehash()

        self.nodes[0].generate(5)
        # ignore included in block message
        _ = self.zmqSubSocket.recv_multipart()
        sync_blocks(self.nodes)

        # disconnect nodes; mine few blocks on n1; create transaction tx2 on node1 and mine a block
        # then create tx1 on node0 that use same output as tx2.
        disconnect_nodes_bi(self.nodes, 0, 1)

        self.nodes[1].generate(5)
        tx2 = CTransaction()
        tx_outs = [CTxOut(4400000000, CScript([OP_TRUE]))]
        tx2.vout = tx_outs
        tx2.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]

        tx_hex = self.nodes[1].signrawtransaction(ToHex(tx2))['hex']
        tx2_size = len(tx_hex) / 2
        tx2 = FromHex(CTransaction(), tx_hex)
        tx2.rehash()
        self.nodes[1].sendrawtransaction(tx_hex, True)
        blockhash_tx2 = self.nodes[1].generate(1)[0]

        tx1 = CTransaction()
        tx_outs = [CTxOut(4300000000, CScript([OP_TRUE]))]
        tx1.vout = tx_outs
        tx1.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]

        tx_hex = self.nodes[0].signrawtransaction(ToHex(tx1))['hex']
        tx1 = FromHex(CTransaction(), tx_hex)
        tx1.rehash()
        self.nodes[0].sendrawtransaction(tx_hex, True)

        self.nodes[0].generate(1)
        # ignore included in block message
        _ = self.zmqSubSocket.recv_multipart()

        # connect nodes again to cause reorg to n1 chain, we now expect to
        # get conflict for tx1, because tx2 that uses same input as tx1 is already
        # in block on longer chain.
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes)

        msg = self.zmqSubSocket.recv_multipart()
        assert_equal(msg[0], b"discardedfrommempool")
        body = json.loads(msg[1])
        assert_equal(body["reason"], "collision-in-block-tx")
        assert_equal(body["txid"], tx1.hash)
        assert_equal(body["collidedWith"]["txid"], tx2.hash)
        assert_equal(body["collidedWith"]["size"], tx2_size)
        assert_equal(body["blockhash"], blockhash_tx2)
Beispiel #8
0
    def run_test(self):
        # Synchronize mc_node1, mc_node2 and mc_node3, then disconnect them.
        self.sync_all()
        disconnect_nodes_bi(self.nodes, 0, 1)
        disconnect_nodes_bi(self.nodes, 0, 2)
        mc_node1 = self.nodes[0]
        mc_node2 = self.nodes[1]
        mc_node3 = self.nodes[2]
        sc_node1 = self.sc_nodes[0]

        # Test 1: Generate SC block, when all MC blocks already synchronized.
        # Generate 1 SC block
        scblock_id0 = generate_next_blocks(sc_node1, "first node", 1)[0]
        # Verify that SC block has no MC headers, ref data, ommers
        check_mcheaders_amount(0, scblock_id0, sc_node1)
        check_mcreferencedata_amount(0, scblock_id0, sc_node1)
        check_ommers_amount(0, scblock_id0, sc_node1)

        # Test 2: Generate SC block, when new MC block following the same Tip appear.
        # Generate 1 MC block on the first MC node
        mcblock_hash1 = mc_node1.generate(1)[0]

        # Sync MC nodes 1 and 3 once
        connect_nodes_bi(self.nodes, 0, 2)
        self.sync_nodes([mc_node1, mc_node3])
        disconnect_nodes_bi(self.nodes, 0, 2)

        # Generate 1 SC block
        scblock_id1 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id1, sc_node1)
        # Verify that SC block contains MC block as a MainchainReference
        check_mcheaders_amount(1, scblock_id1, sc_node1)
        check_mcreferencedata_amount(1, scblock_id1, sc_node1)
        check_mcreference_presence(mcblock_hash1, scblock_id1, sc_node1)
        check_ommers_amount(0, scblock_id1, sc_node1)

        # Test 3: Generate SC block, when new MC blocks following different Tip appear. Ommers expected.
        # Generate another 2 MC blocks on the second MC node
        fork_mcblock_hash1 = mc_node2.generate(1)[0]
        fork_mcblock_hash2 = mc_node2.generate(1)[0]

        # Connect and synchronize MC node 1 to MC node 2
        connect_nodes_bi(self.nodes, 0, 1)
        self.sync_nodes([mc_node1, mc_node2])
        # MC Node 1 should replace mcblock_hash1 Tip with [fork_mcblock_hash1, fork_mcblock_hash2]
        assert_equal(fork_mcblock_hash2, mc_node1.getbestblockhash())

        # Generate 1 SC block
        scblock_id2 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id2, sc_node1)
        # Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
        check_mcheaders_amount(2, scblock_id2, sc_node1)
        check_mcreferencedata_amount(0, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash1, scblock_id2, sc_node1)
        check_mcheader_presence(fork_mcblock_hash2, scblock_id2, sc_node1)
        # Verify that SC block contains 1 Ommer with 1 MainchainHeader
        check_ommers_amount(1, scblock_id2, sc_node1)
        check_ommers_cumulative_score(1, scblock_id2, sc_node1)
        check_ommer(scblock_id1, [mcblock_hash1], scblock_id2, sc_node1)

        # Test 4: Generate SC block, when new MC blocks following previous Tip appear and lead to chain switching again.
        # Ommers expected. Subommers expected with mc blocks for the same MC branch as current SC block,
        # but orphaned to parent Ommer MC headers.

        # Generate 2 more mc blocks in MC node 3
        mcblock_hash2 = mc_node3.generate(1)[0]
        mcblock_hash3 = mc_node3.generate(1)[0]

        # Sync MC nodes 1 and 3 once
        connect_nodes_bi(self.nodes, 0, 2)
        self.sync_nodes([mc_node1, mc_node3])
        disconnect_nodes_bi(self.nodes, 0, 2)
        # MC Node 1 should replace back fork_mcblock_hash2 Tip with [mcblock_hash1, mcblock_hash2, mcblock_hash3]
        assert_equal(mcblock_hash3, mc_node1.getbestblockhash())

        # Generate SC block
        scblock_id3 = generate_next_blocks(sc_node1, "first node", 1)[0]
        check_scparent(scblock_id0, scblock_id3, sc_node1)
        # Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
        check_mcheaders_amount(3, scblock_id3, sc_node1)
        check_mcreferencedata_amount(0, scblock_id3, sc_node1)
        check_mcheader_presence(mcblock_hash1, scblock_id3, sc_node1)
        check_mcheader_presence(mcblock_hash2, scblock_id3, sc_node1)
        check_mcheader_presence(mcblock_hash3, scblock_id3, sc_node1)
        # Verify Ommers cumulative score, that must also count 1 subommer
        check_ommers_cumulative_score(2, scblock_id3, sc_node1)
        # Verify that SC block contains 1 Ommer with 2 MainchainHeader
        check_ommers_amount(1, scblock_id3, sc_node1)
        check_ommer(scblock_id2, [fork_mcblock_hash1, fork_mcblock_hash2],
                    scblock_id3, sc_node1)
        # Verify that Ommer contains 1 subommer with 1 MainchainHeader
        check_subommer(scblock_id2, scblock_id1, [mcblock_hash1], scblock_id3,
                       sc_node1)
Beispiel #9
0
    def run_test(self):
        for node in self.nodes:
            self.consolidation_factor = int(
                node.getnetworkinfo()['minconsolidationfactor'])
            self.minConfirmations = int(
                node.getnetworkinfo()['minconfconsolidationinput'])
            self.log.info("consolidation factor: {}".format(
                self.consolidation_factor))
            self.log.info("minimum input confirmations: {}".format(
                self.minConfirmations))

            # Disconnect nodes before each generate RPC. On a busy environment generate
            # RPC might not create the provided number of blocks. While nodes are communicating
            # P2P messages can cause generateBlocks function to skip a block. Check the comment
            # in generateBlocks function for details.
            disconnect_nodes_bi(self.nodes, 0, 1)
            node.generate(300)
            connect_nodes_bi(self.nodes, 0, 1)

            # test ratio between size of input script and size of output script
            tx_hex = self.create_and_sign_tx(node, 1, min_confirmations=1)
            tx = FromHex(CTransaction(), tx_hex)
            tx.rehash()
            sin = len(getInputScriptPubKey(node, tx.vin[0], 0))
            sout = len(tx.vout[0].scriptPubKey)

            enough_inputs = sout * self.consolidation_factor // sin
            enough_inputs = max(enough_inputs, 2)
            enough_confirmations = self.minConfirmations

            # FAILING CONDITION: input_sizes <= consolidation_factor * output_size
            # We assume scriptSig ~ 4 * scriptPubKey
            tx_hex = self.create_and_sign_tx(
                node,
                in_count=enough_inputs - 1,
                min_confirmations=enough_confirmations)
            assert_raises_rpc_error(-26, "66: insufficient priority",
                                    node.sendrawtransaction, tx_hex)
            self.log.info("test 1: PASS")

            # FAILING CONDITION: not enough input confirmations
            tx_hex = self.create_and_sign_tx(
                node,
                in_count=enough_inputs,
                min_confirmations=enough_confirmations - 1)
            assert_raises_rpc_error(-26, "66: insufficient priority",
                                    node.sendrawtransaction, tx_hex)
            self.log.info("test 2: PASS")

            # ALL CONDITIONS MET: must succeed
            tx_hex = self.create_and_sign_tx(
                node,
                in_count=enough_inputs,
                min_confirmations=enough_confirmations)
            txid = node.sendrawtransaction(tx_hex)
            node.generate(1)
            tx = node.getrawtransaction(txid, 1)
            confirmations = tx.get('confirmations', 0)
            assert_equal(confirmations, 1)
            self.log.info("test 3: PASS")
            # Blocks must be synced because we do not want to start generating new blocks on node1 in the next loop iteration
            # before node1 has received all blocks generated on node0 and all pending P2P block requests have completed.
            sync_blocks(self.nodes)

        # Verify deprecated -minconsolidationinputmaturity is an alias to -minconfconsolidationinput
        self.log.info("Restarting nodes to test config options...")
        self.stop_nodes()
        self.extra_args[0].append("-minconsolidationinputmaturity=99")
        self.start_nodes(self.extra_args)
        sync_blocks(self.nodes)
        assert_equal(
            99, self.nodes[0].getnetworkinfo()['minconfconsolidationinput'])
        assert_equal(
            99,
            self.nodes[0].getnetworkinfo()['minconsolidationinputmaturity'])

        # Verify deprecation warning is logged
        self.stop_nodes()
        deprecation_log = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Option -minconsolidationinputmaturity is deprecated, use -minconfconsolidationinput instead" in line:
                deprecation_log = True
                #self.log.info("Found line: %s", line.strip())
                break
        assert (deprecation_log)

        # Verify init error when deprecated and new option are used together
        self.extra_args[0].append("-minconfconsolidationinput=99")
        self.assert_start_raises_init_error(
            0, self.extra_args[0],
            'Cannot use both -minconfconsolidationinput and -minconsolidationinputmaturity (deprecated) at the same time'
        )
Beispiel #10
0
    def run_test(self):

        # Test 1:
        # 1. fund an attacker for the test on node0
        # 2. progress to block height 200
        # 3. sync all nodes
        # 4. disconnect the two nodes forking at block height 200
        # 5. spend attackers fund in node0 and double spend them in node1
        # 6. Assert that the two chains actually contain the attackers double-spends

        attacker = User(b"horsebattery")
        friend0_of_attacker = User(b"fatstack")
        friend1_of_attacker = User(b"fatheap")
        node0 = self.nodes[0]  # victim node
        node1 = self.nodes[1]  # node under control of attacker

        self.log.info("fund attacker. We fund him at height 200 -2")
        self.log.info(
            "just for debugging convenience. We plan to fork at height 200")
        coinbase_tx = self.make_coinbase(node0)
        node0.generate(self.FORK_ROOT_HEIGHT - 2)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT - 1)

        self.log.info("fund attacker")
        funding_tx = self.send_funds_to_attacker(node0, attacker, coinbase_tx)
        node0.generate(1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 0)

        self.log.info(
            "sync nodes. All nodes have the same chain and funding transactions after syncing"
        )
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes)
        disconnect_nodes_bi(self.nodes, 0, 1)

        # fork from here
        assert (node0.getblockcount() == node1.getblockcount())

        self.log.info("spends attackers funds in node0")
        for i in range(self.nbDoubleSpends):
            attacker.spend_to_pkh(node0, funding_tx, i,
                                  funding_tx.vout[i].nValue,
                                  friend0_of_attacker.pubkey)
        node0.generate(1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 1)

        self.log.info("double spend attacker funds in node1")
        for i in range(self.nbDoubleSpends):
            attacker.spend_to_pkh(node1, funding_tx, i,
                                  funding_tx.vout[i].nValue,
                                  friend1_of_attacker.pubkey)

        node1.generate(1)
        first_bad_block = node1.getbestblockhash()

        assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT + 1)

        self.log.info(
            "check that funds have been double spent to different addresses")
        assert (self.contains_double_spends() == self.nbDoubleSpends)

        # Test 2.
        # 1. Progress the two competing chains in node0 and node1 to different lengths (configurable).
        #    node1 shall hold the longer chain and is the one controlled by the attacker.
        #    The two nodes are not connected to each other directly or indirectly and at this point
        #    contain the doulbe-spends we have prapared.
        # 2. connect the nodes and sync them to force a reorg
        # 3. Assert that all double-spends disappeared - which nontheless means the attack succeeded.
        assert (self.lenChain0 <= self.lenChain1)
        self.log.info("Mine lenChain0 blocks on node0")

        node0.generate(self.lenChain0 - 1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT +
                self.lenChain0)

        self.log.info("Mine competing lenChain1 blocks on node1")
        node1.generate(self.lenChain1 - 1)
        assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT +
                self.lenChain1)

        self.log.info("Connect nodes to force a reorg")
        connect_nodes(self.nodes, 1, 0)
        sync_blocks(self.nodes[0:2])
        if self.lenChain1 > self.lenChain0:
            assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT +
                    self.lenChain1)
        else:
            assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT +
                    self.lenChain0)

        self.log.info("check that both nodes have the same chains")
        lastblock0 = node0.getbestblockhash()
        lastblock1 = node1.getbestblockhash()
        assert (lastblock0 == lastblock1)

        self.log.info("check that double-spends have been removed")
        assert (self.contains_double_spends() == 0)

        # Test 3: Assert that safemode has been reached
        try:
            node0.rpc.getbalance()
            assert False, "Should not come to here, should raise exception in line above."
        except JSONRPCException as e:
            assert e.error[
                "message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected."

        # Test 4: Assert that safemode is exited if the offending chain is invalidated
        node0.invalidateblock(first_bad_block)
        node0.ignoresafemodeforblock(first_bad_block)
        balance = node0.rpc.getbalance()
        assert (balance != None)
    def run_test(self):

        # Test 1:
        # 1. fund an attacker for the test on node0
        # 2. progress to block height 200
        # 3. sync all nodes
        # 4. disconnect the two nodes forking at block height 200
        # 5. spend attackers fund in node0 and double spend them in node1
        # 6. Assert that the two chains actually contain the attackers double spends

        attacker = User(b"horsebattery")
        friend0_of_attacker = User(b"fatstack")
        friend1_of_attacker = User(b"fatheap")
        node0 = self.nodes[0]  # victim node
        node1 = self.nodes[1]  # node under control of attacker

        # The dsdetector is conneget_JSON_notificationcted to node- to which it sends ds messages
        dsdetector = MockDsdetector(self, node0)
        # The exchange is connected to a node0 in which it can invalidate chains
        # Additionally the Exchange listens to webhook messages on port 8888.
        # node0 will send webhook messages because node0 is connectged to the dsdetector
        exchange = Exchange(self, node0)
        exchange.start_webhook_server()

        self.log.info("fund attacker. We fund him at height 200 -2")
        self.log.info(
            "just for debugging convenience. We plan to fork at height 200")
        coinbase_tx = self.make_coinbase(node0)
        node0.generate(self.FORK_ROOT_HEIGHT - 2)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT - 1)

        self.log.info("fund attacker")
        funding_tx = self.send_funds_to_attacker(node0, attacker, coinbase_tx)
        node0.generate(1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 0)

        self.log.info(
            "sync nodes. All nodes have the same chain and funding transactions after syncing"
        )
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes)
        disconnect_nodes_bi(self.nodes, 0, 1)

        # fork from here
        assert (node0.getblockcount() == node1.getblockcount())

        self.log.info("spends attackers funds in node0")
        for i in range(self.nbDoubleSpends):
            attacker.spend_to_pkh(node0, funding_tx, i,
                                  funding_tx.vout[i].nValue,
                                  friend0_of_attacker.pubkey)
        node0.generate(1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 1)

        self.log.info("double spend attacker funds in node1")
        for i in range(self.nbDoubleSpends):
            attacker.spend_to_pkh(node1, funding_tx, i,
                                  funding_tx.vout[i].nValue,
                                  friend1_of_attacker.pubkey)

        node1.generate(1)
        first_bad_block = node1.getbestblockhash()

        assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT + 1)

        self.log.info(
            "check that funds have been double spent to different addresses")
        assert (dsdetector.CheckForDoubleSpends(
            self.nodes[0:2]) == self.nbDoubleSpends)

        # Test 2.
        # 1. Progress the two competing chains in node0 and node1 to different lengths (configurable).
        #    node1 shall hold the longer chain and is the one controlled by the attacker.
        #    The two nodes are not connected to each other directly or indirectly and at this point
        #    contain the doulbe spends we have prapared.
        # 2. connect the nodes and sync them to force a reorg
        # 3. Assert that all double spends disappeared - which nontheless means the attack succeeded.
        self.log.info("Mine lenChain0 blocks on node0")

        node0.generate(self.lenChain0 - 1)
        assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT +
                self.lenChain0)

        self.log.info("Mine competing lenChain1 blocks on node1")
        node1.generate(self.lenChain1 - 1)
        assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT +
                self.lenChain1)

        self.log.info("Connect nodes to force a reorg")
        # note that reorg may not happen with next safe mode specification and
        # and safe mode may be entered
        connect_nodes(self.nodes, 1, 0)
        sync_blocks(self.nodes[0:2])
        if self.lenChain1 > self.lenChain0:
            assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT +
                    self.lenChain1)
        else:
            assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT +
                    self.lenChain0)

        self.log.info("check that both nodes have the same chains")
        lastblock0 = node0.getbestblockhash()
        lastblock1 = node1.getbestblockhash()
        assert (lastblock0 == lastblock1)

        self.log.info("check that double spends have been removed")
        assert (dsdetector.CheckForDoubleSpends(self.nodes[0:2]) == 0)

        # Test 3: Assert that safemode has been reached
        try:
            node0.rpc.getbalance()
            assert False, "Should not come to here, should raise exception in line above."
        except JSONRPCException as e:
            assert e.error[
                "message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected."

        # Test 4: Assert that safemode is exited if the offending chain is invalidated
        # We know a ds notification has been sent hence the exchange will react to this
        # notification by invalidating the bad chain which also make sthe node exit safemode.
        dsdetector.SendDsNotification()
        while not exchange.CatchNotification_InvalidateIfRequired(
                node0, first_bad_block):
            time.sleep(1)
        balance = node0.rpc.getbalance()
        assert (balance != None)

        exchange.stop_webhook_server()