Example #1
0
    def run_test(self):
        # Create one transaction on node 0 with a unique amount and label for
        # each possible type of wallet import RPC.
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.label = "label {} {}".format(i, variant)
            variant.address = self.nodes[1].validateaddress(
                self.nodes[1].getnewaddress(variant.label))
            variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
            variant.initial_amount = 10 - (i + 1) / 4.0
            variant.initial_txid = self.nodes[0].sendtoaddress(
                variant.address["address"], variant.initial_amount)

        # Generate a block containing the initial transactions, then another
        # block further in the future (past the rescan window).
        self.nodes[0].generate(1)
        assert_equal(self.nodes[0].getrawmempool(), [])
        timestamp = self.nodes[0].getblockheader(
            self.nodes[0].getbestblockhash())["time"]
        set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)

        # For each variation of wallet key import, invoke the import RPC and
        # check the results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
            expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
            variant.node = self.nodes[
                2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
            variant.do_import(timestamp)
            if expect_rescan:
                variant.expected_balance = variant.initial_amount
                variant.expected_txs = 1
                variant.check(variant.initial_txid, variant.initial_amount, 2)
            else:
                variant.expected_balance = 0
                variant.expected_txs = 0
                variant.check()

        # Create new transactions sending to each address.
        fee = self.nodes[0].getnetworkinfo()["relayfee"]
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.sent_amount = 10 - (2 * i + 1) / 8.0
            variant.sent_txid = self.nodes[0].sendtoaddress(
                variant.address["address"], variant.sent_amount)

        # Generate a block containing the new transactions.
        self.nodes[0].generate(1)
        assert_equal(self.nodes[0].getrawmempool(), [])
        sync_blocks(self.nodes)

        # Check the latest results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            if not variant.expect_disabled:
                variant.expected_balance += variant.sent_amount
                variant.expected_txs += 1
                variant.check(variant.sent_txid, variant.sent_amount, 1)
            else:
                variant.check()
Example #2
0
    def run_test(self):
        # Create one transaction on node 0 with a unique amount and label for
        # each possible type of wallet import RPC.
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.label = "label {} {}".format(i, variant)
            variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
            variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
            variant.initial_amount = 10 - (i + 1) / 4.0
            variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)

        # Generate a block containing the initial transactions, then another
        # block further in the future (past the rescan window).
        self.nodes[0].generate(1)
        assert_equal(self.nodes[0].getrawmempool(), [])
        timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
        set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)

        # For each variation of wallet key import, invoke the import RPC and
        # check the results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
            expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
            variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
            variant.do_import(timestamp)
            if expect_rescan:
                variant.expected_balance = variant.initial_amount
                variant.expected_txs = 1
                variant.check(variant.initial_txid, variant.initial_amount, 2)
            else:
                variant.expected_balance = 0
                variant.expected_txs = 0
                variant.check()

        # Create new transactions sending to each address.
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.sent_amount = 10 - (2 * i + 1) / 8.0
            variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)

        # Generate a block containing the new transactions.
        self.nodes[0].generate(1)
        assert_equal(self.nodes[0].getrawmempool(), [])
        sync_blocks(self.nodes)

        # Check the latest results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            if not variant.expect_disabled:
                variant.expected_balance += variant.sent_amount
                variant.expected_txs += 1
                variant.check(variant.sent_txid, variant.sent_amount, 1)
            else:
                variant.check()
Example #3
0
    def test_islock_overrides_nonchainlock(self):
        # create two raw TXs, they will conflict with each other
        rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
        rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']

        rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
        rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')

        # Create an ISLOCK but don't broadcast it yet
        islock = self.create_islock(rawtx2)

        # Stop enough MNs so that ChainLocks don't work anymore
        for i in range(3):
            self.stop_node(len(self.nodes) - 1)
            self.nodes.pop(len(self.nodes) - 1)
            self.mninfo.pop(len(self.mninfo) - 1)

        # Send tx1, which will later conflict with the ISLOCK
        self.nodes[0].sendrawtransaction(rawtx1)

        # fast forward 11 minutes, so that the TX is considered safe and included in the next block
        self.bump_mocktime(int(60 * 11))
        set_node_times(self.nodes, self.mocktime)

        # Mine the conflicting TX into a block
        good_tip = self.nodes[0].getbestblockhash()
        self.nodes[0].generate(2)
        self.sync_all()

        # Assert that the conflicting tx got mined and the locked TX is not valid
        assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
        assert_raises_rpc_error(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)

        # Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
        # the locked transaction yet
        self.test_node.send_islock(islock)
        time.sleep(5)

        assert(self.nodes[0].getbestblockhash() == good_tip)
        assert(self.nodes[1].getbestblockhash() == good_tip)

        # Send the actual transaction and mine it
        self.nodes[0].sendrawtransaction(rawtx2)
        self.nodes[0].generate(1)
        self.sync_all()

        assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
        assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
        assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
        assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
        assert(self.nodes[0].getbestblockhash() != good_tip)
        assert(self.nodes[1].getbestblockhash() != good_tip)
Example #4
0
    def run_test(self):
        self.nodes[0].generate(200)
        self.sync_blocks()
        self.asset = '2615707979'
        spv_tx_root = "a0842ab40a9c4770c8ec74158aadcf943e8158128fdd1ba8cef9c7cb8eda732692"
        spv_tx_parent_nodes = "f9039cf871a04442f3f69add48df0531fe3c0025103b53fcf3fe38060e5f29366caec8855e4fa0229f7b7e69c0b5793f8a61c06f5cc09b0f4938561856c632ee56c3b2c4d6d153808080808080a07720fff5e8eabef55fa129ee55b3b0d82875e2b25b8f26e22cf6b5c4f9cec7ab8080808080808080f901f180a03ee147749c5b769bc5d1a53e4f37567506d417de4ec4e67722130eda4638427da043caa62b40dad61bce4d50fb62ea485729a6687c3aa13895cf4ba234b92afe82a0b79958e4aa63104da4599ebb91e712375e6adfc89abc14b9533c5778f107e7d8a01bc7f80f81a8d281253ac882bb89aca6131e5794bfcbdccde990bb6d5be6cb2fa0aedad62f1426b68e395a59e06bf242fb28b882af67589bce3495a99650058ec4a0c21a7e0b9d0948bb6b65a5e73f5f01173064d20e4819ca4884d1eabc22bf737da090087708c533b10af8925eebf398c005fc16cb6a515111f2be4f328f762949d0a02827daacd6a52ae6c74a78791ff0c5e33a7a85f5ca0a47cdfbcd5219f75f705ca0af7ecf31d56575155d272cd813bf7d7ac435f62b0538c31771e407dafef6be53a09b74707c3abdbfa305cb61f23c940f063f553f17d0bd3013126aad357193353ea067a52ed59820bb48f8010d2b2bb0ee92803b1a00a8341fd4c3269b065ed070d9a0bf0e9b45955283e6e04b71eda63bfc7b55d9f54527943aa1c159b4161b1e1daea0ecabd4c00deacf9a7ff25be942c9f468628eb776fbec23a9ca0d8fc256f14a31a0df406c7ac7f38c2ea1d9bdb06c2e51db3de8cf0e655a8e0e683e19ca1ddf83d3a08360ec6c5e26614f144520ed9d0b577640381f0f38b5429b67422f75d603ad5a80f9013220b9012ef9012b82051f843b9aca008307a120940765efb302d504751c652c5b1d65e8e9edf2e70f80b8c454c988ff00000000000000000000000000000000000000000000000000000002540be400000000000000000000000000000000000000000000000000000000009be8894b0000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000002c62637274317130667265323430737939326d716b386b6b377073616561366b74366d3537323570377964636a00000000000000000000000000000000000000002ca0dccb6e077c3f6252d199202113893407119d4ba09667113f2d20c63a31487b87a01e0a059e50f08f2772781691f2c9e43a9503a167c98cf467b1afc177b74d84e6"
        spv_tx_value = "f9012b82051f843b9aca008307a120940765efb302d504751c652c5b1d65e8e9edf2e70f80b8c454c988ff00000000000000000000000000000000000000000000000000000002540be400000000000000000000000000000000000000000000000000000000009be8894b0000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000002c62637274317130667265323430737939326d716b386b6b377073616561366b74366d3537323570377964636a00000000000000000000000000000000000000002ca0dccb6e077c3f6252d199202113893407119d4ba09667113f2d20c63a31487b87a01e0a059e50f08f2772781691f2c9e43a9503a167c98cf467b1afc177b74d84e6"
        spv_tx_path = "0b"
        spv_receipt_root = "a0a958499bf48fcce17672b58aa9037bd3dafeb6231880722d909c60bacfaaa8d4"
        spv_receipt_parent_nodes = "f90551f871a0cab13def05783d763febde31920bd234d0486c26955c2937e0486db909a28eeea09cf564a668a29a5f1cc5d6ef8e19988dfd2b30d290672f0ffc4200e608cb65ac808080808080a029b8ed2258c53562954c87bcd7f60671029680d2a19ef8bcd3ad470ea48d57d18080808080808080f901f180a07c21ca39872e6b8f611bc6b1b295c24f988b5cf944625eabf5236b37ea3b9f01a0edb9e63fdc31ba41f11a8b2fb8000ad1357b3c0b27a8483968d75e93e7b488a1a02231847aa3c5dde2f2a1851a66aabec65e5eaae8c28110756f122c72be1fba05a08fa87809e5b7f989e78ccbe1a6bc4924115d5747529af879f2fe196f959b64fca091f1bf748061eba21a413b72d70afccb8daebb5906d5cd9dda06d5f877065d5ba0d7e6c82dd1c25eb2f90b02f038beaff98c260d46992d0b3c1eac7d51552c7417a01d5c43deb2e3794292cdffb04f82ab25bc4e75f5e0cab928b66582e08026f5b1a0d7323a87dc8fbc66c7b34810d2cad92fc0da168d962b4556e825a3266a148b74a0af31f0b7cdcd6a855ac7678ef2b8fcb1afeda918b0c8e4696a4013f2b75ca402a0f9d63f2db8ab6d3c3e12073ac2910ee575832bde3e4586f18e59dd26a16adb7ca0f0c91e059c43780617d304fe8992511f096ccc35232da1f25127db53ba4fb05aa052030932d0a9026efd2a3ada67f33d401cd9a97ddb24c606af3a0a0c24e432aba0142af9b4686c6ca30b0ac39133fa76d8682b7bbbec488e62e652d3f25419777da0940f31617e91cfbabaa9d0d1638949f8125f80a43027122778522675194a4e65a0edc4c7d2cf30150fdf7e502d0ef06c80c85fc37260134a112493c6183f62f4b580f902e720b902e3f902e00183192ee2b9010000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000200000000000000008000000000000000000000100200000000000000000010000000000000200000000000000000000000000000000000010000000000000000000000000000004000000000000000000000000400004001000000000020000000000000000000000000080000000000000408000000040000000000000000002000000000000000000000000000000000000000000000000000000000010000000000000000010000000000000000000000000000000000000000000f901d5f89b94f2bb7bfa19e7c4b6bb333ee1afdf8e5e8f9b3561f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c144053a00000000000000000000000000765efb302d504751c652c5b1d65e8e9edf2e70fa000000000000000000000000000000000000000000000000000000002540be400f89b94f2bb7bfa19e7c4b6bb333ee1afdf8e5e8f9b3561f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a0000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c144053a00000000000000000000000000765efb302d504751c652c5b1d65e8e9edf2e70fa00000000000000000000000000000000000000000000000000000000000000000f899940765efb302d504751c652c5b1d65e8e9edf2e70fe1a09c6dea23fe3b510bb5d170df49dc74e387692eaa3258c691918cd3aa94f5fb74b860000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c14405300000000000000000000000000000000000000000000000000000002540be4000000000000000000000000000000000000000000000000000000080800000002"
        spv_receipt_value = "f902e00183192ee2b9010000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000200000000000000008000000000000000000000100200000000000000000010000000000000200000000000000000000000000000000000010000000000000000000000000000004000000000000000000000000400004001000000000020000000000000000000000000080000000000000408000000040000000000000000002000000000000000000000000000000000000000000000000000000000010000000000000000010000000000000000000000000000000000000000000f901d5f89b94f2bb7bfa19e7c4b6bb333ee1afdf8e5e8f9b3561f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c144053a00000000000000000000000000765efb302d504751c652c5b1d65e8e9edf2e70fa000000000000000000000000000000000000000000000000000000002540be400f89b94f2bb7bfa19e7c4b6bb333ee1afdf8e5e8f9b3561f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a0000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c144053a00000000000000000000000000765efb302d504751c652c5b1d65e8e9edf2e70fa00000000000000000000000000000000000000000000000000000000000000000f899940765efb302d504751c652c5b1d65e8e9edf2e70fe1a09c6dea23fe3b510bb5d170df49dc74e387692eaa3258c691918cd3aa94f5fb74b860000000000000000000000000b0ea8c9ee8aa87efd28a12de8c034f947c14405300000000000000000000000000000000000000000000000000000002540be4000000000000000000000000000000000000000000000000000000080800000002"
        height = 6816449
        blockhash = '0xee524852fb7df5a6c27106f4bc47e740e6a6751e66bce1f98363ff2eecbf8c0d'
        prevblockhash = '0x5f41930a021d1c48a8add5d35aac63f18e085c2ee862990660603058fd6216c0'
        bridgetransferid = 2

        self.basic_asset()
        self.nodes[0].generate(1)
        assetInfo = self.nodes[0].assetinfo(self.asset)
        assert_equal(assetInfo['asset_guid'], self.asset)
        self.sync_blocks()

        # Add eth root to DB so it an validate this SPV proof, do it on both nodes so they can verify the tx
        self.nodes[0].syscoinsetethheaders([[6816449, blockhash, prevblockhash, spv_tx_root, spv_receipt_root, 1594359054]])
        self.nodes[0].syscoinsetethstatus('synced', 6816449)
        self.nodes[1].syscoinsetethheaders([[6816449, blockhash, prevblockhash, spv_tx_root, spv_receipt_root, 1594359054]])
        self.nodes[1].syscoinsetethstatus('synced', 6816449)

        newaddress = self.nodes[0].getnewaddress()
        # must wait an hour first
        assert_raises_rpc_error(-26, 'mint-insufficient-confirmations', self.nodes[0].assetallocationmint, self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes)
        set_node_times(self.nodes, self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + 3600)
        self.nodes[0].generate(50)
        # try to enable aux fee which should throw an error of invalid value
        assert_raises_rpc_error(-26, 'mint-mismatch-value', self.nodes[0].assetallocationmint, self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes, True)
        self.nodes[0].assetallocationmint(self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes)
        time.sleep(0.25)
        # cannot mint twice
        assert_raises_rpc_error(-26, 'mint-duplicate-transfer', self.nodes[0].assetallocationmint, self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes)
        self.nodes[0].generate(1)
        self.sync_blocks()
        # after a block it should show a different exists error
        assert_raises_rpc_error(-26, 'mint-exists', self.nodes[0].assetallocationmint, self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes)

        # increase time by ~1 week and assetallocationmint should throw timeout error, must send to network by 0.5 week at least
        numBlocks = int(604800 / (2*60*59))
        # need to bump 2 hours at a time max
        for block in range(numBlocks):
            set_node_times(self.nodes, self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + (2*60*59))
            self.nodes[0].generate(1)
        assert_raises_rpc_error(-26, 'mint-too-old', self.nodes[0].assetallocationmint, self.asset, newaddress, 100, height, bridgetransferid, spv_tx_value, spv_tx_root, spv_tx_parent_nodes, spv_tx_path, spv_receipt_value, spv_receipt_root, spv_receipt_parent_nodes)
    def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum):
        if with_initial_quorum:
            self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
            self.wait_for_sporks_same()

            # Mine one quorum before dip8 is activated
            self.mine_quorum()

        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
        self.wait_for_sporks_same()

        cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(),
                                      2)["tx"][0]
        assert (cbtx["cbTx"]["version"] == 1)

        assert (self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]
                ["status"] != "active")

        while self.nodes[0].getblockchaininfo(
        )["bip9_softforks"]["dip0008"]["status"] != "active":
            self.nodes[0].generate(4)
            self.sync_all()
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)

        # Assert that merkleRootQuorums is present and 0 (we have no quorums yet)
        cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(),
                                      2)["tx"][0]
        assert_equal(cbtx["cbTx"]["version"], 2)
        assert ("merkleRootQuorums" in cbtx["cbTx"])
        merkleRootQuorums = int(cbtx["cbTx"]["merkleRootQuorums"], 16)

        if with_initial_quorum:
            assert (merkleRootQuorums != 0)
        else:
            assert_equal(merkleRootQuorums, 0)

        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()

        # Mine quorum and verify that merkleRootQuorums has changed
        quorum = self.mine_quorum()
        cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(),
                                      2)["tx"][0]
        assert (int(cbtx["cbTx"]["merkleRootQuorums"], 16) !=
                merkleRootQuorums)

        return quorum
Example #6
0
    def test_mempool_doublespend(self):
        sender = self.nodes[self.sender_idx]
        receiver = self.nodes[self.receiver_idx]
        isolated = self.nodes[self.isolated_idx]

        # feed the sender with some balance
        sender_addr = sender.getnewaddress()
        self.nodes[0].sendtoaddress(sender_addr, 1)
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        self.nodes[0].generate(2)
        self.sync_all()

        # create doublespending transaction, but don't relay it
        dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
        dblspnd_txid = bytes_to_hex_str(
            hash256(hex_str_to_bytes(dblspnd_tx['hex']))[::-1])
        # isolate one node from network
        isolate_node(isolated)
        # send doublespend transaction to isolated node
        isolated.sendrawtransaction(dblspnd_tx['hex'])
        # let isolated node rejoin the network
        # The previously isolated node should NOT relay the doublespending TX
        reconnect_isolated_node(isolated, 0)
        for node in self.nodes:
            if node is not isolated:
                assert_raises_rpc_error(
                    -5, "No such mempool or blockchain transaction",
                    node.getrawtransaction, dblspnd_txid)
        # instantsend to receiver. The previously isolated node should prune the doublespend TX and request the correct
        # TX from other nodes.
        receiver_addr = receiver.getnewaddress()
        is_id = sender.sendtoaddress(receiver_addr, 0.9)
        # wait for the transaction to propagate
        sync_mempools(self.nodes)
        for node in self.nodes:
            self.wait_for_instantlock(is_id, node)
        assert_raises_rpc_error(-5,
                                "No such mempool or blockchain transaction",
                                isolated.getrawtransaction, dblspnd_txid)
        # send coins back to the controller node without waiting for confirmations
        receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "",
                               True)
        assert_equal(receiver.getwalletinfo()["balance"], 0)
        # mine more blocks
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        self.nodes[0].generate(2)
        self.sync_all()
Example #7
0
    def run_test(self):
        # Encrypt wallet for test_locked_wallet_fails test
        self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
        self.nodes[1].walletpassphrase(WALLET_PASSPHRASE,
                                       WALLET_PASSPHRASE_TIMEOUT)

        connect_nodes_bi(self.nodes, 0, 1)
        self.sync_all()

        peer_node, rbf_node = self.nodes
        rbf_node_address = rbf_node.getnewaddress()

        # fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
        self.log.info("Mining blocks...")
        peer_node.generate(110)
        self.sync_all()
        for i in range(25):
            peer_node.sendtoaddress(rbf_node_address, 0.001)

        # we need to move time to sync
        set_node_times([rbf_node], TestNode.Mocktime)
        self.sync_all()
        peer_node.generate(1)
        self.sync_all()
        assert_equal(rbf_node.getbalance(), Decimal("0.025"))

        self.log.info("Running tests")
        dest_address = peer_node.getnewaddress()
        test_simple_bumpfee_succeeds(self, rbf_node, peer_node, dest_address)
        test_segwit_bumpfee_succeeds(rbf_node, dest_address)
        test_nonrbf_bumpfee_fails(peer_node, dest_address)
        test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
        test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address,
                                           dest_address)
        test_small_output_fails(rbf_node, dest_address)
        test_dust_to_fee(rbf_node, dest_address)
        test_settxfee(rbf_node, dest_address)
        test_rebumping(rbf_node, dest_address)
        test_rebumping_not_replaceable(rbf_node, dest_address)
        test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
        test_bumpfee_metadata(rbf_node, dest_address)
        test_locked_wallet_fails(rbf_node, dest_address)
        test_change_script_match(rbf_node, dest_address)
        test_maxtxfee_fails(self, rbf_node, dest_address)
        # These tests wipe out a number of utxos that are expected in other tests
        test_small_output_with_feerate_succeeds(rbf_node, dest_address)
        test_no_more_inputs_fails(rbf_node, dest_address)
        self.log.info("Success")
Example #8
0
    def do_one_round(self):
        a0 = self.nodes[0].getnewaddress()
        a1 = self.nodes[1].getnewaddress()
        a2 = self.nodes[2].getnewaddress()

        self.one_send(0, a1)
        self.one_send(0, a2)
        self.one_send(1, a0)
        self.one_send(1, a2)
        self.one_send(2, a0)
        self.one_send(2, a1)

        # Have the miner (node3) mine a block.
        # Must sync mempools before mining.
        print("bef mempool", TestNode.Mocktime)
        set_node_times(self.nodes, 0)
        self.sync_mempools()
        print(TestNode.Mocktime)
        self.nodes[3].generate(1)
        self.sync_blocks()
Example #9
0
    def run_test(self):
        # init custom fields
        self.mocktime -= (131 * 60)
        self.recipient_0 = self.nodes[0].getnewaddress()
        self.recipient_1 = self.nodes[1].getnewaddress()
        self.init_dummy_key()

        # start test
        self.log_title()
        set_node_times(self.nodes, self.mocktime)

        # nodes[0] mines 50 blocks (201-250) to reach PoS activation
        self.log.info("Mining 50 blocks to reach PoS phase...")
        for i in range(50):
            self.mocktime = self.generate_pow(0, self.mocktime)
        sync_blocks(self.nodes)

        # Check Tests 1-3
        self.test_1()
        self.test_2()
        self.test_3()
 def test_single_node_session_timeout(self, do_cycle_llmqs):
     set_node_times(self.nodes, get_mocktime())
     isolate_node(self.nodes[3])
     rawtx = self.nodes[0].createrawtransaction(
         [], {self.nodes[0].getnewaddress(): 1})
     rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
     rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
     txid = self.nodes[3].sendrawtransaction(rawtx)
     time.sleep(2)  # make sure signing is done on node 2 (it's async)
     # Make the signing session for the IS lock timeout on node 3
     set_mocktime(get_mocktime() + 61)
     set_node_times(self.nodes, get_mocktime())
     time.sleep(2)  # make sure Cleanup() is called
     reconnect_isolated_node(self.nodes[3], 0)
     self.wait_for_mnauth(self.nodes[3], 2)
     self.nodes[0].sendrawtransaction(rawtx)
     # Make sure nodes 1 and 2 received the TX
     self.wait_for_tx(txid, self.nodes[1])
     self.wait_for_tx(txid, self.nodes[2])
     # Make sure signing is done on nodes 1 and 2 (it's async)
     time.sleep(5)
     # node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
     self.wait_for_instantlock(txid,
                               self.nodes[0],
                               False,
                               1,
                               do_assert=True)
     if do_cycle_llmqs:
         self.cycle_llmqs()
         self.wait_for_instantlock(txid,
                                   self.nodes[0],
                                   False,
                                   5,
                                   do_assert=True)
     # Make node 0 consider the TX as safe
     set_mocktime(get_mocktime() + 10 * 60 + 1)
     self.nodes[0].setmocktime(get_mocktime())
     block = self.nodes[0].generate(1)[0]
     self.wait_for_chainlocked_block_all_nodes(block)
Example #11
0
    def test_block_doublespend(self):
        sender = self.nodes[self.sender_idx]
        receiver = self.nodes[self.receiver_idx]
        isolated = self.nodes[self.isolated_idx]

        # feed the sender with some balance
        sender_addr = sender.getnewaddress()
        self.nodes[0].sendtoaddress(sender_addr, 1)
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        self.nodes[0].generate(2)
        self.sync_all()

        # create doublespending transaction, but don't relay it
        dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
        # isolate one node from network
        isolate_node(isolated)
        # instantsend to receiver
        receiver_addr = receiver.getnewaddress()
        is_id = sender.sendtoaddress(receiver_addr, 0.9)
        # wait for the transaction to propagate
        connected_nodes = self.nodes.copy()
        del connected_nodes[self.isolated_idx]
        sync_mempools(connected_nodes)
        for node in connected_nodes:
            self.wait_for_instantlock(is_id, node)
        # send doublespend transaction to isolated node
        isolated.sendrawtransaction(dblspnd_tx['hex'])
        # generate block on isolated node with doublespend transaction
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        isolated.generate(1)
        wrong_block = isolated.getbestblockhash()
        # connect isolated block to network
        reconnect_isolated_node(isolated, 0)
        # check doublespend block is rejected by other nodes
        timeout = 10
        for i in range(0, self.num_nodes):
            if i == self.isolated_idx:
                continue
            res = self.nodes[i].waitforblock(wrong_block, timeout)
            assert (res['hash'] != wrong_block)
            # wait for long time only for first node
            timeout = 1
        # send coins back to the controller node without waiting for confirmations
        receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "",
                               True)
        assert_equal(receiver.getwalletinfo()["balance"], 0)
        # mine more blocks
        # TODO: mine these blocks on an isolated node
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        self.nodes[0].generate(2)
        self.sync_all()
Example #12
0
    def run_test(self):
        self.log_title()
        set_node_times(self.nodes, self.mocktime)
        sporkName = "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"

        # 0 - check SPORK 8 status from node 1 (must be inactive)
        assert_equal(False, self.is_spork_active(1, sporkName))

        # 1 - activate SPORK 8 with nodes[0]
        assert_equal("success", self.activate_spork(0, sporkName))
        sleep(1)
        # check SPORK 8 status from nodes[1] (must be active)
        assert_equal(True, self.is_spork_active(1, sporkName))

        # 2 - Adjust time to 1 sec in the future and deactivate SPORK 8 with node[0]
        self.mocktime += 1
        set_node_times(self.nodes, self.mocktime)
        assert_equal("success", self.deactivate_spork(0, sporkName))
        sleep(1)
        # check SPORK 8 value from nodes[1] (must be inactive again)
        assert_equal(False, self.is_spork_active(1, sporkName))

        # 3 - Adjust time to 1 sec in the future and set new value (mocktime) for SPORK 8 with node[0]
        self.mocktime += 1
        set_node_times(self.nodes, self.mocktime)
        assert_equal("success", self.set_spork(0, sporkName, self.mocktime))
        sleep(1)
        # check SPORK 8 value from nodes[1] (must be equal to mocktime)
        assert_equal(self.mocktime, self.get_spork(1, sporkName))

        # 4 - Set cold staking removal spork and check value
        newSporkName = "SPORK_21_COLDSTAKING_REMOVAL"
        assert_equal("success", self.activate_spork(0, newSporkName,
                                                    1644796801))
        sleep(1)
        assert_equal(1644796801, self.get_spork(1, newSporkName))

        # 5 - Stop nodes and check value again after restart
        self.log.info("Stopping nodes...")
        self.stop_nodes()
        self.log.info("Restarting node 1...")
        self.start_node(1, [])
        assert_equal(self.mocktime, self.get_spork(1, sporkName))
        self.log.info("%s: TEST PASSED" % self.__class__.__name__)
    def run_test(self):
        minerA = self.nodes[self.minerAPos]  # also controller of mn1 and mn2
        minerB = self.nodes[self.minerBPos]
        mn1 = self.nodes[self.remoteOnePos]
        mn2 = self.nodes[self.remoteTwoPos]

        # First mine 250 PoW blocks (50 with minerB, 200 with minerA)
        self.log.info("Generating 259 blocks...")
        for _ in range(2):
            for _ in range(25):
                self.mocktime = self.generate_pow(self.minerBPos,
                                                  self.mocktime)
            self.sync_blocks()
            for _ in range(100):
                self.mocktime = self.generate_pow(self.minerAPos,
                                                  self.mocktime)
            self.sync_blocks()
        # Then stake 9 blocks with minerA
        self.stake_and_ping(self.minerAPos, 9, [])
        for n in self.nodes:
            assert_equal(n.getblockcount(), 259)

        # Setup Masternodes
        self.log.info("Masternodes setup...")
        ownerdir = os.path.join(self.options.tmpdir, "node%d" % self.minerAPos,
                                "regtest")
        self.mnOneCollateral = self.setupMasternode(minerA, minerA,
                                                    self.masternodeOneAlias,
                                                    ownerdir,
                                                    self.remoteOnePos,
                                                    self.mnOnePrivkey)
        self.mnTwoCollateral = self.setupMasternode(minerA, minerA,
                                                    self.masternodeTwoAlias,
                                                    ownerdir,
                                                    self.remoteTwoPos,
                                                    self.mnTwoPrivkey)

        # Activate masternodes
        self.log.info("Masternodes activation...")
        self.stake_and_ping(self.minerAPos, 1, [])
        time.sleep(3)
        self.advance_mocktime(10)
        remoteOnePort = p2p_port(self.remoteOnePos)
        remoteTwoPort = p2p_port(self.remoteTwoPos)
        mn1.initmasternode(self.mnOnePrivkey,
                           "127.0.0.1:" + str(remoteOnePort))
        mn2.initmasternode(self.mnTwoPrivkey,
                           "127.0.0.1:" + str(remoteTwoPort))
        self.stake_and_ping(self.minerAPos, 1, [])
        self.wait_until_mnsync_finished()
        self.controller_start_masternode(minerA, self.masternodeOneAlias)
        self.controller_start_masternode(minerA, self.masternodeTwoAlias)
        self.wait_until_mn_preenabled(self.mnOneCollateral.hash, 40)
        self.wait_until_mn_preenabled(self.mnOneCollateral.hash, 40)
        self.send_3_pings([mn1, mn2])
        self.wait_until_mn_enabled(self.mnOneCollateral.hash, 120, [mn1, mn2])
        self.wait_until_mn_enabled(self.mnOneCollateral.hash, 120, [mn1, mn2])

        # activate sporks
        self.log.info("Masternodes enabled. Activating sporks.")
        self.activate_spork(self.minerAPos,
                            "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT")
        self.activate_spork(self.minerAPos,
                            "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT")
        self.activate_spork(self.minerAPos, "SPORK_13_ENABLE_SUPERBLOCKS")

        # Create a proposal and vote on it
        next_superblock = minerA.getnextsuperblock()
        payee = minerA.getnewaddress()
        self.log.info("Creating a proposal to be paid at block %d" %
                      next_superblock)
        proposalFeeTxId = minerA.preparebudget("test1", "https://test1.org", 2,
                                               next_superblock, payee, 300)
        self.stake_and_ping(self.minerAPos, 3, [mn1, mn2])
        proposalHash = minerA.submitbudget("test1", "https://test1.org", 2,
                                           next_superblock, payee, 300,
                                           proposalFeeTxId)
        time.sleep(1)
        self.stake_and_ping(self.minerAPos, 7, [mn1, mn2])
        self.log.info("Vote for the proposal and check projection...")
        minerA.mnbudgetvote("alias", proposalHash, "yes",
                            self.masternodeOneAlias)
        minerA.mnbudgetvote("alias", proposalHash, "yes",
                            self.masternodeTwoAlias)
        time.sleep(1)
        self.stake_and_ping(self.minerAPos, 1, [mn1, mn2])
        projection = minerB.getbudgetprojection()[0]
        assert_equal(projection["Name"], "test1")
        assert_equal(projection["Hash"], proposalHash)
        assert_equal(projection["Yeas"], 2)

        # Create the finalized budget and vote on it
        self.log.info("Finalizing the budget...")
        self.stake_and_ping(self.minerAPos, 5, [mn1, mn2])
        assert (minerA.mnfinalbudgetsuggest() is not None)
        time.sleep(1)
        self.stake_and_ping(self.minerAPos, 4, [mn1, mn2])
        budgetFinHash = minerA.mnfinalbudgetsuggest()
        assert (budgetFinHash != "")
        time.sleep(1)
        minerA.mnfinalbudget("vote-many", budgetFinHash)
        self.stake_and_ping(self.minerAPos, 2, [mn1, mn2])
        budFin = minerB.mnfinalbudget("show")
        budget = budFin[next(iter(budFin))]
        assert_equal(budget["VoteCount"], 2)

        # Stake up until the block before the superblock.
        skip_blocks = next_superblock - minerA.getblockcount() - 1
        self.stake_and_ping(self.minerAPos, skip_blocks, [mn1, mn2])

        # Split the network.
        self.log.info("Splitting the chain at block %d" %
                      minerA.getblockcount())
        self.split_network()

        # --- Chain A ---
        self.nodes.pop(self.minerBPos)
        # mine the superblock and check payment
        self.log.info("Checking superblock on chain A...")
        self.create_and_check_superblock(minerA, next_superblock, payee)
        # Add 10 blocks on top
        self.log.info("Staking 10 blocks...")
        self.stake_and_ping(self.nodes.index(minerA), 10, [mn1, mn2])

        # --- Chain B ---
        other_nodes = self.nodes.copy()
        self.nodes = [minerB]
        # mine the superblock and check payment
        self.log.info("Checking superblock on chain B...")
        self.create_and_check_superblock(minerB, next_superblock, payee)
        # Add 1 single block on top
        self.log.info("Staking 1 block...")
        self.stake_and_ping(self.nodes.index(minerB), 1, [])

        # --- Reconnect nodes --
        self.log.info("Reconnecting and re-organizing blocks...")
        self.nodes = other_nodes
        self.nodes.insert(self.minerBPos, minerB)
        set_node_times(self.nodes, self.mocktime)
        self.reconnect_nodes()
        self.sync_all()
        assert_equal(minerB.getblockcount(), next_superblock + 10)
        assert_equal(minerB.getbestblockhash(), minerA.getbestblockhash())

        self.log.info("All good.")
    def run_test(self):
        node = self.nodes[0]

        self.mocktime = int(time.time())

        self.log.info("Test block finalization...")
        node.generatetoaddress(10, node.get_deterministic_priv_key().address)
        tip = node.getbestblockhash()
        node.finalizeblock(tip)
        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        def wait_for_tip(node, tip):
            def check_tip():
                return node.getbestblockhash() == tip
            self.wait_until(check_tip)

        alt_node = self.nodes[1]
        wait_for_tip(alt_node, tip)

        alt_node.invalidateblock(tip)
        # We will use this later
        fork_block = alt_node.getbestblockhash()

        # Node 0 should not accept the whole alt_node's chain due to tip being finalized,
        # even though it is longer.
        # Headers would not be accepted if previousblock is invalid:
        #    - First block from alt node has same height than node tip, but is on a minority chain. Its
        #    status is "valid-headers"
        #    - Second block from alt node has height > node tip height, will be marked as invalid because
        #    node tip is finalized
        #    - Later blocks from alt node will be rejected because their previous block are invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(218 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)

        def wait_for_block(node, block, status="invalid"):
            def check_block():
                for tip in node.getchaintips():
                    if tip["hash"] == block:
                        assert tip["status"] != "active"
                        return tip["status"] == status
                return False
            self.wait_until(check_block)

        # First block header is accepted as valid-header
        alt_node.generatetoaddress(
            1, alt_node.get_deterministic_priv_key().address)
        wait_for_block(node, alt_node.getbestblockhash(), "valid-headers")

        # Second block header is accepted but set invalid
        alt_node.generatetoaddress(
            1, alt_node.get_deterministic_priv_key().address)
        invalid_block = alt_node.getbestblockhash()
        wait_for_block(node, invalid_block)

        # Later block headers are rejected
        for _ in range(2, 9):
            alt_node.generatetoaddress(
                1, alt_node.get_deterministic_priv_key().address)
            assert_raises_rpc_error(-5, RPC_BLOCK_NOT_FOUND_ERROR,
                                    node.getblockheader, alt_node.getbestblockhash())

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info("Test that an invalid block cannot be finalized...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock, invalid_block)

        self.log.info(
            "Test that invalidating a finalized block moves the finalization backward...")

        # Node's finalized block will be invalidated, which causes the finalized block to
        # move to the previous block.
        #
        # Expected state:
        #
        # On alt_node:
        #                                                 >(210)->(211)-> // ->(218 tip)
        #                                                /
        # (200)->(201)-> // ->(208 auto-finalized)->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210 tip)
        node.invalidateblock(tip)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)

        assert_equal(alt_node.getfinalizedblockhash(), node.getblockheader(
            node.getfinalizedblockhash())['previousblockhash'])

        # The node will now accept that chain as the finalized block moved back.
        # Generate a new block on alt_node to trigger getheader from node
        # Previous 212-218 height blocks have been droped because their previous was invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)->(212)-> // ->(218)->(219 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(invalid_block)

        alt_node_tip = alt_node.generatetoaddress(
            1, alt_node.get_deterministic_priv_key().address)[-1]
        wait_for_tip(node, alt_node_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), fork_block)
        assert_equal(alt_node.getfinalizedblockhash(), fork_block)

        self.log.info("Trigger reorg via block finalization...")
        # Finalize node tip to reorg
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                           >(210 invalid)-> // ->(219 invalid)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)
        node.finalizeblock(tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info("Try to finalize a block on a competiting fork...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock, alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info(
            "Check auto-finalization occurs as the tip move forward...")
        # Reconsider alt_node tip then generate some more blocks on alt_node.
        # Auto-finalization will occur on both chains.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        node.reconsiderblock(alt_node.getbestblockhash())
        block_to_autofinalize = alt_node.generatetoaddress(
            1, alt_node.get_deterministic_priv_key().address)[-1]
        alt_node_new_tip = alt_node.generatetoaddress(
            9, alt_node.get_deterministic_priv_key().address)[-1]
        wait_for_tip(node, alt_node_new_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)
        assert_equal(alt_node.getfinalizedblockhash(), alt_node_tip)

        self.log.info(
            "Try to finalize a block on an already finalized chain...")
        # Finalizing a block of an already finalized chain should have no
        # effect
        block_218 = node.getblockheader(alt_node_tip)['previousblockhash']
        node.finalizeblock(block_218)
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)

        self.log.info(
            "Make sure reconsidering block move the finalization point...")
        # Reconsidering the tip will move back the finalized block on node
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), alt_node_new_tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)

        # TEST FINALIZATION DELAY

        self.log.info("Check that finalization delay prevents eclipse attacks")
        # Because there has been no delay since the beginning of this test,
        # there should have been no auto-finalization on delay_node.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        delay_node = self.nodes[2]
        wait_for_tip(delay_node, alt_node_new_tip)
        assert_equal(delay_node.getfinalizedblockhash(), str())

        self.log.info(
            "Check that finalization delay does not prevent auto-finalization")
        # Expire the delay, then generate 1 new block with alt_node to
        # update the tip on all chains.
        # Because the finalization delay is expired, auto-finalization
        # should occur.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node], self.mocktime)
        new_tip = alt_node.generatetoaddress(
            1, alt_node.get_deterministic_priv_key().address)[-1]

        assert_equal(alt_node.getbestblockhash(), new_tip)
        assert_equal(alt_node.getfinalizedblockhash(), block_to_autofinalize)

        wait_for_tip(node, new_tip)
        assert_equal(node.getfinalizedblockhash(), block_to_autofinalize)

        wait_for_tip(delay_node, new_tip)
        self.log.info(
            "Check that finalization delay is effective on node boot")
        # Restart the new node, so the blocks have no header received time.
        self.restart_node(2)

        # There should be no finalized block (getfinalizedblockhash returns an
        # empty string)
        assert_equal(delay_node.getfinalizedblockhash(), str())

        # Generate 20 blocks with no delay. This should not trigger auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(250 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        blocks = delay_node.generatetoaddress(
            20, alt_node.get_deterministic_priv_key().address)
        reboot_autofinalized_block = blocks[10]
        new_tip = blocks[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(), str())

        # Now let the finalization delay to expire, then generate one more block.
        # This should resume auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(241 auto-finalized)-> // ->(251 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node], self.mocktime)
        new_tip = delay_node.generatetoaddress(
            1, delay_node.get_deterministic_priv_key().address)[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(),
                     reboot_autofinalized_block)
Example #15
0
    def run_test(self):

        def findUtxoInList(txid, vout, utxo_list):
            for x in utxo_list:
                if x["txid"] == txid and x["vout"] == vout:
                    return True, x
            return False, None

        # Check DASHD and zDASHD supply at the beginning
        # ------------------------------------------
        # zDASHD supply: 2 coins for each denomination
        expected_zpiv_supply = {
            "1": 2,
            "5": 10,
            "10": 20,
            "50": 100,
            "100": 200,
            "500": 1000,
            "1000": 2000,
            "5000": 10000,
            "total": 13332,
        }
        # DASHD supply: block rewards minus burned fees for minting
        expected_money_supply = 250.0 * 330 - 16 * 0.01
        self.check_money_supply(expected_money_supply, expected_zpiv_supply)

        # Stake with node 0 and node 1 up to public spend activation (400)
        # 70 blocks: 5 blocks each (x7)
        self.log.info("Staking 70 blocks to reach public spends activation...")
        set_node_times(self.nodes, self.mocktime)
        for i in range(7):
            for peer in range(2):
                for nblock in range(5):
                    self.mocktime = self.generate_pos(peer, self.mocktime)
                sync_blocks(self.nodes)
        block_time_0 = block_time_1 = self.mocktime
        self.log.info("Blocks staked.")

        # Check balances
        self.log.info("Checking balances...")
        initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
        # --nodes 0, 1: 62 pow blocks + 55 pos blocks
        assert_equal(initial_balance[0], DecimalAmt(250.0 * (62 + 55)))
        assert_equal(initial_balance[1], DecimalAmt(250.0 * (62 + 55)))
        # --node 2: 62 pow blocks + 20 pos blocks - zc minted - zcfee
        assert_equal(initial_balance[2], DecimalAmt(250.0 * (62 + 20) - 6666 - 0.08))
        assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666))
        self.log.info("Balances ok.")

        # create the raw zerocoin spend txes
        addy = self.nodes[2].getnewaddress()
        self.log.info("Creating the raw zerocoin public spends...")
        mints = self.nodes[2].listmintedzerocoins(True, True)
        tx_A0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], addy)
        tx_A1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], addy)
        # Spending same coins to different recipients to get different txids
        new_addy = "yAVWM5urwaTyhiuFQHP2aP47rdZsLUG5PH"
        tx_B0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], new_addy)
        tx_B1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], new_addy)

        # Disconnect nodes
        minted_amount = mints[0]["denomination"] + mints[1]["denomination"]
        self.disconnect_all()

        # Stake one block with node-0 and save the stake input
        self.log.info("Staking 1 block with node 0...")
        initial_unspent_0 = self.nodes[0].listunspent()
        self.nodes[0].generate(1)
        block_time_0 += 60
        set_node_times(self.nodes, block_time_0)
        last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
        assert(len(last_block["tx"]) > 1)                                       # a PoS block has at least two txes
        coinstake_txid = last_block["tx"][1]
        coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
        assert (coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "")  # first output of coinstake is empty
        stakeinput = coinstake_tx["vin"][0]

        # The stake input was unspent 1 block ago, now it's not
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
        assert (res and utxo["spendable"])
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))

        # Relay zerocoin spends
        self.nodes[0].sendrawtransaction(tx_A0)
        self.nodes[0].sendrawtransaction(tx_A1)

        # Stake 10 more blocks with node-0 and check balances
        self.log.info("Staking 10 more blocks with node 0...")
        for i in range(10):
            block_time_0 = self.generate_pos(0, block_time_0)
        expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
        assert_equal(self.get_tot_balance(0), expected_balance_0)
        self.log.info("Balance for node 0 checks out.")

        # Connect with node 2, sync and check zerocoin balance
        self.log.info("Reconnecting node 0 and node 2")
        connect_nodes(self.nodes[0], 2)
        sync_blocks([self.nodes[i] for i in [0, 2]])
        self.log.info("Resetting zerocoin mints on node 2")
        self.nodes[2].resetmintzerocoin(True)
        assert_equal(self.get_tot_balance(2), initial_balance[2] + DecimalAmt(minted_amount))
        assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666-minted_amount))
        self.log.info("Balance for node 2 checks out.")

        # Double spending txes not possible
        assert_raises_rpc_error(-26, "bad-zc-spend-contextcheck",
                                self.nodes[0].sendrawtransaction, tx_B0)
        assert_raises_rpc_error(-26, "bad-zc-spend-contextcheck",
                                self.nodes[0].sendrawtransaction, tx_B1)

        # verify that the stakeinput can't be spent
        stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
        stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
        rawtx_unsigned = self.nodes[0].createrawtransaction(
            [{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
            {"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
        rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
        assert(rawtx["complete"])
        try:
            self.nodes[0].sendrawtransaction(rawtx["hex"])
        except JSONRPCException as e:
            # JSONRPCException was thrown as expected. Check the code and message values are correct.
            if e.error["code"] not in [-26, -25]:
                raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
            if ([x for x in ["bad-txns-inputs-spent", "Missing inputs"] if x in e.error['message']] == []):
                raise e
        except Exception as e:
            raise AssertionError("Unexpected exception raised: " + type(e).__name__)
        self.log.info("GOOD: v2 spend was not possible.")

        # Spend tx_B0 and tx_B1 on the other chain
        self.nodes[1].sendrawtransaction(tx_B0)
        self.nodes[1].sendrawtransaction(tx_B1)

        # Stake 12 blocks with node-1
        set_node_times(self.nodes, block_time_1)
        self.log.info("Staking 12 blocks with node 1...")
        for i in range(12):
            block_time_1 = self.generate_pos(1, block_time_1)
        expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
        assert_equal(self.get_tot_balance(1), expected_balance_1)
        self.log.info("Balance for node 1 checks out.")

        # re-connect and sync nodes and check that node-0 and node-2 get on the other chain
        new_best_hash = self.nodes[1].getbestblockhash()
        self.log.info("Connecting and syncing nodes...")
        set_node_times(self.nodes, block_time_1)
        connect_nodes_clique(self.nodes)
        sync_blocks(self.nodes)
        for i in [0, 2]:
            assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)

        # check balance of node-0
        assert_equal(self.get_tot_balance(0), initial_balance[0])
        self.log.info("Balance for node 0 checks out.")

        # check that NOW the original stakeinput is present and spendable
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (res and utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is spendable again." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
        self.nodes[0].sendrawtransaction(rawtx["hex"])
        self.nodes[1].generate(1)
        sync_blocks(self.nodes)
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])

        # Verify that DASHD and zDASHD supplies were properly updated after the spends and reorgs
        self.log.info("Check DASHD and zDASHD supply...")
        expected_money_supply += 250.0 * (self.nodes[1].getblockcount() - 330)
        spent_coin_0 = mints[0]["denomination"]
        spent_coin_1 = mints[1]["denomination"]
        expected_zpiv_supply[str(spent_coin_0)] -= spent_coin_0
        expected_zpiv_supply[str(spent_coin_1)] -= spent_coin_1
        expected_zpiv_supply["total"] -= (spent_coin_0 + spent_coin_1)
        self.check_money_supply(expected_money_supply, expected_zpiv_supply)
        self.log.info("Supply checks out.")
    def run_test(self):

        def get_zerocoin_data(coin):
            return coin["s"], coin["r"], coin["k"], coin["id"], coin["d"], coin["t"]

        def check_balances(denom, zafo_bal, afo_bal):
            zafo_bal -= denom
            assert_equal(self.nodes[2].getzerocoinbalance()['Total'], zafo_bal)
            afo_bal += denom
            wi = self.nodes[2].getwalletinfo()
            assert_equal(wi['balance'] + wi['immature_balance'], afo_bal)
            return zafo_bal, afo_bal

        def stake_4_blocks(block_time):
            sync_mempools(self.nodes)
            for peer in range(2):
                for i in range(2):
                    block_time = self.generate_pos(peer, block_time)
                sync_blocks(self.nodes)
            return block_time

        self.log_title()
        block_time = self.mocktime
        set_node_times(self.nodes, block_time)

        # Start with cache balances
        wi = self.nodes[2].getwalletinfo()
        balance = wi['balance'] + wi['immature_balance']
        zafo_balance = self.nodes[2].getzerocoinbalance()['Total']
        assert_equal(balance, DecimalAmt(13833.92))
        assert_equal(zafo_balance, 6666)

        # Export zerocoin data
        listmints = self.nodes[2].listmintedzerocoins(True, True)
        serial_ids = [mint["serial hash"] for mint in listmints]
        exported_zerocoins = [x for x in self.nodes[2].exportzerocoins(False) if x["id"] in serial_ids]
        exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
        assert_equal(8, len(exported_zerocoins))

        # 1) stake more blocks - save a v3 spend for later (serial_1)
        serial_1, randomness_1, privkey_1, id_1, denom_1, tx_1 = get_zerocoin_data(exported_zerocoins[1])
        self.log.info("Staking 70 blocks to get to public spend activation")
        for j in range(5):
            for peer in range(2):
                for i in range(7):
                    block_time = self.generate_pos(peer, block_time)
                sync_blocks(self.nodes)
        old_spend_v3 = self.nodes[2].createrawzerocoinspend(id_1)

        # 2) Spend one minted coin - spend v3 (serial_2)
        serial_2, randomness_2, privkey_2, id_2, denom_2, tx_2 = get_zerocoin_data(exported_zerocoins[2])
        self.log.info("Spending the minted coin with serial %s..." % serial_2[:16])
        txid = self.nodes[2].spendzerocoinmints([id_2])['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zafo_balance, balance = check_balances(denom_2, zafo_balance, balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")

        # 3) Check double spends - spend v3
        self.log.info("Trying to spend the serial twice now...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)


        # 4) Activate v4 spends with SPORK_18
        self.setV4SpendEnforcement()

        # 5) Spend one minted coin - spend v4 (serial_3)
        serial_3, randomness_3, privkey_3, id_3, denom_3, tx_3 = get_zerocoin_data(exported_zerocoins[3])
        self.log.info("Spending the minted coin with serial %s..." % serial_3[:16])
        txid = self.nodes[2].spendzerocoinmints([id_3])['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zafo_balance, balance = check_balances(denom_3, zafo_balance, balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v4) PASSED")

        # 6) Check double spends - spend v4
        self.log.info("Trying to spend the serial twice now...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_3, randomness_3, denom_3, privkey_3, "", tx_3)

        # 7) Try to relay old v3 spend now (serial_1)
        self.log.info("Trying to send old v3 spend now...")
        assert_raises_rpc_error(-26, "bad-zc-spend-version",
                                self.nodes[2].sendrawtransaction, old_spend_v3)
        self.log.info("GOOD: Old transaction not sent.")

        # 8) Try to double spend with v4 a mint already spent with v3 (serial_2)
        self.log.info("Trying to double spend v4 against v3...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
        self.log.info("GOOD: Double-spending transaction did not verify.")

        # 9) Reactivate v3 spends and try to spend the old saved one (serial_1) again
        self.setV4SpendEnforcement(False)
        self.log.info("Trying to send old v3 spend now (serial: %s...)" % serial_1[:16])
        txid = self.nodes[2].sendrawtransaction(old_spend_v3)
        # stake 4 blocks - check it gets included on chain and check balances
        _ = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        # need to reset spent mints since this was a raw broadcast
        self.nodes[2].resetmintzerocoin()
        _, _ = check_balances(denom_1, zafo_balance, balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
Example #17
0
    def run_test(self):
        self.description = "Performs tests on the Cold Staking P2CS implementation"
        self.init_test()
        NUM_OF_INPUTS = 20
        INPUT_VALUE = 249

        # nodes[0] - coin-owner
        # nodes[1] - cold-staker

        # 1) nodes[0] and nodes[2] mine 25 blocks each
        # --------------------------------------------
        print("*** 1 ***")
        self.log.info("Mining 50 Blocks...")
        for peer in [0, 2]:
            for j in range(25):
                self.mocktime = self.generate_pow(peer, self.mocktime)
            sync_blocks(self.nodes)

        # 2) node[1] sends his entire balance (50 mature rewards) to node[2]
        #  - node[2] stakes a block - node[1] locks the change
        print("*** 2 ***")
        self.log.info("Emptying node1 balance")
        assert_equal(self.nodes[1].getbalance(), 50 * 250)
        txid = self.nodes[1].sendtoaddress(self.nodes[2].getnewaddress(), (50 * 250 - 0.01))
        assert (txid is not None)
        sync_mempools(self.nodes)
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)
        # lock the change output (so it's not used as stake input in generate_pos)
        for x in self.nodes[1].listunspent():
            assert (self.nodes[1].lockunspent(False, [{"txid": x['txid'], "vout": x['vout']}]))
        # check that it cannot stake
        sleep(1)
        assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], False)

        # 3) nodes[0] generates a owner address
        #    nodes[1] generates a cold-staking address.
        # ---------------------------------------------
        print("*** 3 ***")
        owner_address = self.nodes[0].getnewaddress()
        self.log.info("Owner Address: %s" % owner_address)
        staker_address = self.nodes[1].getnewstakingaddress()
        staker_privkey = self.nodes[1].dumpprivkey(staker_address)
        self.log.info("Staking Address: %s" % staker_address)

        # 4) Check enforcement.
        # ---------------------
        print("*** 4 ***")
        # Check that SPORK 17 is disabled
        assert (not self.isColdStakingEnforced())
        self.log.info("Creating a stake-delegation tx before cold staking enforcement...")
        assert_raises_rpc_error(-4, "The transaction was rejected!",
                                self.nodes[0].delegatestake, staker_address, INPUT_VALUE, owner_address, False, False, True)
        self.log.info("Good. Cold Staking NOT ACTIVE yet.")

        # Enable SPORK
        self.setColdStakingEnforcement()
        # double check
        assert (self.isColdStakingEnforced())

        # 5) nodes[0] delegates a number of inputs for nodes[1] to stake em.
        # ------------------------------------------------------------------
        print("*** 5 ***")
        self.log.info("First check warning when using external addresses...")
        assert_raises_rpc_error(-5, "Only the owner of the key to owneraddress will be allowed to spend these coins",
                                self.nodes[0].delegatestake, staker_address, INPUT_VALUE, "n3ZYUKZCULd6XNK7X3SRxrHRJNwKePiCVd")
        self.log.info("Good. Warning triggered.")

        self.log.info("Now force the use of external address creating (but not sending) the delegation...")
        res = self.nodes[0].rawdelegatestake(staker_address, INPUT_VALUE, "n3ZYUKZCULd6XNK7X3SRxrHRJNwKePiCVd", True)
        assert(res is not None and res != "")
        self.log.info("Good. Warning NOT triggered.")

        self.log.info("Now delegate with internal owner address..")
        self.log.info("Try first with a value (0.99) below the threshold")
        assert_raises_rpc_error(-8, "Invalid amount",
                                self.nodes[0].delegatestake, staker_address, 0.99, owner_address)
        self.log.info("Nice. it was not possible.")
        self.log.info("Then try (creating but not sending) with the threshold value (1.00)")
        res = self.nodes[0].rawdelegatestake(staker_address, 1.00, owner_address)
        assert(res is not None and res != "")
        self.log.info("Good. Warning NOT triggered.")

        self.log.info("Now creating %d real stake-delegation txes..." % NUM_OF_INPUTS)
        for i in range(NUM_OF_INPUTS):
            res = self.nodes[0].delegatestake(staker_address, INPUT_VALUE, owner_address)
            assert(res != None and res["txid"] != None and res["txid"] != "")
            assert_equal(res["owner_address"], owner_address)
            assert_equal(res["staker_address"], staker_address)
        sync_mempools(self.nodes)
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)
        self.log.info("%d Txes created." % NUM_OF_INPUTS)
        # check balances:
        self.expected_balance = NUM_OF_INPUTS * INPUT_VALUE
        self.expected_immature_balance = 0
        self.checkBalances()

        # 6) check that the owner (nodes[0]) can spend the coins.
        # -------------------------------------------------------
        print("*** 6 ***")
        self.log.info("Spending back one of the delegated UTXOs...")
        delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
        assert_equal(NUM_OF_INPUTS, len(delegated_utxos))
        assert_equal(len(delegated_utxos), len(self.nodes[0].listcoldutxos()))
        u = delegated_utxos[0]
        txhash = self.spendUTXOwithNode(u, 0)
        assert(txhash != None)
        self.log.info("Good. Owner was able to spend - tx: %s" % str(txhash))

        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)
        # check balances after spend.
        self.expected_balance -= float(u["amount"])
        self.checkBalances()
        self.log.info("Balances check out after spend")
        assert_equal(NUM_OF_INPUTS-1, len(self.nodes[0].listcoldutxos()))

        # 7) check that the staker CANNOT use the coins to stake yet.
        # He needs to whitelist the owner first.
        # -----------------------------------------------------------
        print("*** 7 ***")
        self.log.info("Trying to generate a cold-stake block before whitelisting the owner...")
        assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], False)
        self.log.info("Nice. Cold staker was NOT able to create the block yet.")

        self.log.info("Whitelisting the owner...")
        ret = self.nodes[1].delegatoradd(owner_address)
        assert(ret)
        self.log.info("Delegator address %s whitelisted" % owner_address)

        # 8) check that the staker CANNOT spend the coins.
        # ------------------------------------------------
        print("*** 8 ***")
        self.log.info("Trying to spend one of the delegated UTXOs with the cold-staking key...")
        delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
        assert_greater_than(len(delegated_utxos), 0)
        u = delegated_utxos[0]
        assert_raises_rpc_error(-26, "mandatory-script-verify-flag-failed (Script failed an OP_CHECKCOLDSTAKEVERIFY operation",
                                self.spendUTXOwithNode, u, 1)
        self.log.info("Good. Cold staker was NOT able to spend (failed OP_CHECKCOLDSTAKEVERIFY)")
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)

        # 9) check that the staker can use the coins to stake a block with internal miner.
        # --------------------------------------------------------------------------------
        print("*** 9 ***")
        assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], True)
        self.log.info("Generating one valid cold-stake block...")
        self.mocktime = self.generate_pos(1, self.mocktime)
        self.log.info("New block created by cold-staking. Trying to submit...")
        newblockhash = self.nodes[1].getbestblockhash()
        self.log.info("Block %s submitted" % newblockhash)

        # Verify that nodes[0] accepts it
        sync_blocks(self.nodes)
        assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
        assert_equal(newblockhash, self.nodes[0].getbestblockhash())
        self.log.info("Great. Cold-staked block was accepted!")

        # check balances after staked block.
        self.expected_balance -= INPUT_VALUE
        self.expected_immature_balance += (INPUT_VALUE + 250)
        self.checkBalances()
        self.log.info("Balances check out after staked block")

        # 10) check that the staker can use the coins to stake a block with a rawtransaction.
        # ----------------------------------------------------------------------------------
        print("*** 10 ***")
        self.log.info("Generating another valid cold-stake block...")
        stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
        stakeInputs = self.get_prevouts(1, stakeable_coins)
        assert_greater_than(len(stakeInputs), 0)
        # Create the block
        new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
        self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
        # Try to submit the block
        ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
        self.log.info("Block %s submitted." % new_block.hash)
        assert(ret is None)

        # Verify that nodes[0] accepts it
        sync_blocks(self.nodes)
        assert_equal(self.nodes[0].getblockcount(), self.nodes[1].getblockcount())
        assert_equal(new_block.hash, self.nodes[0].getbestblockhash())
        self.log.info("Great. Cold-staked block was accepted!")
        self.mocktime += 60
        set_node_times(self.nodes, self.mocktime)

        # check balances after staked block.
        self.expected_balance -= INPUT_VALUE
        self.expected_immature_balance += (INPUT_VALUE + 250)
        self.checkBalances()
        self.log.info("Balances check out after staked block")

        # 11) check that the staker cannot stake a block changing the coinstake scriptPubkey.
        # ----------------------------------------------------------------------------------
        print("*** 11 ***")
        self.log.info("Generating one invalid cold-stake block (changing first coinstake output)...")
        stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
        stakeInputs = self.get_prevouts(1, stakeable_coins)
        assert_greater_than(len(stakeInputs), 0)
        # Create the block (with dummy key)
        new_block = self.stake_next_block(1, stakeInputs, self.mocktime, "")
        self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
        # Try to submit the block
        ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
        self.log.info("Block %s submitted." % new_block.hash)
        assert("rejected" in ret)

        # Verify that nodes[0] rejects it
        sync_blocks(self.nodes)
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
        self.log.info("Great. Malicious cold-staked block was NOT accepted!")
        self.checkBalances()
        self.log.info("Balances check out after (non) staked block")

        # 12) neither adding different outputs to the coinstake.
        # ------------------------------------------------------
        print("*** 12 ***")
        self.log.info("Generating another invalid cold-stake block (adding coinstake output)...")
        stakeable_coins = getDelegatedUtxos(self.nodes[0].listunspent())
        stakeInputs = self.get_prevouts(1, stakeable_coins)
        assert_greater_than(len(stakeInputs), 0)
        # Create the block
        new_block = self.stake_next_block(1, stakeInputs, self.mocktime, staker_privkey)
        # Add output (dummy key address) to coinstake (taking 100 BTCU from the pot)
        self.add_output_to_coinstake(new_block, 100)
        self.log.info("New block created (rawtx) by cold-staking. Trying to submit...")
        # Try to submit the block
        ret = self.nodes[1].submitblock(bytes_to_hex_str(new_block.serialize()))
        self.log.info("Block %s submitted." % new_block.hash)
        assert_equal(ret, "bad-p2cs-outs")

        # Verify that nodes[0] rejects it
        sync_blocks(self.nodes)
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, new_block.hash)
        self.log.info("Great. Malicious cold-staked block was NOT accepted!")
        self.checkBalances()
        self.log.info("Balances check out after (non) staked block")

        # 13) Now node[0] gets mad and spends all the delegated coins, voiding the P2CS contracts.
        # ----------------------------------------------------------------------------------------
        self.log.info("Let's void the contracts.")
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)
        print("*** 13 ***")
        self.log.info("Cancel the stake delegation spending the delegated utxos...")
        delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
        # remove one utxo to spend later
        final_spend = delegated_utxos.pop()
        txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
        assert(txhash != None)
        self.log.info("Good. Owner was able to void the stake delegations - tx: %s" % str(txhash))
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)

        # deactivate SPORK 17 and check that the owner can still spend the last utxo
        self.setColdStakingEnforcement(False)
        assert (not self.isColdStakingEnforced())
        txhash = self.spendUTXOsWithNode([final_spend], 0)
        assert(txhash != None)
        self.log.info("Good. Owner was able to void a stake delegation (with SPORK 17 disabled) - tx: %s" % str(txhash))
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)

        # check balances after big spend.
        self.expected_balance = 0
        self.checkBalances()
        self.log.info("Balances check out after the delegations have been voided.")
        # re-activate SPORK17
        self.setColdStakingEnforcement()
        assert (self.isColdStakingEnforced())

        # 14) check that coinstaker is empty and can no longer stake.
        # -----------------------------------------------------------
        print("*** 14 ***")
        self.log.info("Trying to generate one cold-stake block again...")
        assert_equal(self.nodes[1].getstakingstatus()["stakeablecoins"], False)
        self.log.info("Cigar. Cold staker was NOT able to create any more blocks.")

        # 15) check balances when mature.
        # -----------------------------------------------------------
        print("*** 15 ***")
        self.log.info("Staking 100 blocks to mature the cold stakes...")
        for i in range(2):
            for peer in [0, 2]:
                for j in range(25):
                    self.mocktime = self.generate_pos(peer, self.mocktime)
                sync_blocks(self.nodes)
        self.expected_balance = self.expected_immature_balance
        self.expected_immature_balance = 0
        self.checkBalances()
        delegated_utxos = getDelegatedUtxos(self.nodes[0].listunspent())
        txhash = self.spendUTXOsWithNode(delegated_utxos, 0)
        assert (txhash != None)
        self.log.info("Good. Owner was able to spend the cold staked coins - tx: %s" % str(txhash))
        self.mocktime = self.generate_pos(2, self.mocktime)
        sync_blocks(self.nodes)
        self.expected_balance = 0
        self.checkBalances()
Example #18
0
    def run_test(self):
        while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
            self.nodes[0].generate(10)
        sync_blocks(self.nodes, timeout=60*5)

        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
        self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
        self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
        self.wait_for_sporks_same()

        self.mine_quorum()
        self.mine_quorum()

        # Make sure that all nodes are chainlocked at the same height before starting actual tests
        self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)

        self.log.info("trying normal IS lock")
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        # 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
        # are the only "neighbours" in intra-quorum connections for one of them.
        self.wait_for_instantlock(txid, self.nodes[0])
        self.bump_mocktime(1)
        set_node_times(self.nodes, self.mocktime)
        block = self.nodes[0].generate(1)[0]
        self.wait_for_chainlocked_block_all_nodes(block)

        self.log.info("testing normal signing with partially known TX")
        isolate_node(self.nodes[3])
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        # Make sure nodes 1 and 2 received the TX before we continue,
        # otherwise it might announce the TX to node 3 when reconnecting
        self.wait_for_tx(txid, self.nodes[1])
        self.wait_for_tx(txid, self.nodes[2])
        reconnect_isolated_node(self.nodes[3], 0)
        self.wait_for_mnauth(self.nodes[3], 2)
        # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
        self.wait_for_instantlock(txid, self.nodes[0], False, 5)
        # push the tx directly via rpc
        self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
        # node 3 should vote on a tx now since it became aware of it via sendrawtransaction
        # and this should be enough to complete an IS lock
        self.wait_for_instantlock(txid, self.nodes[0])

        self.log.info("testing retroactive signing with unknown TX")
        isolate_node(self.nodes[3])
        rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
        rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
        rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
        txid = self.nodes[3].sendrawtransaction(rawtx)
        # Make node 3 consider the TX as safe
        self.bump_mocktime(10 * 60 + 1)
        set_node_times(self.nodes, self.mocktime)
        block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
        reconnect_isolated_node(self.nodes[3], 0)
        self.wait_for_chainlocked_block_all_nodes(block)
        self.nodes[0].setmocktime(self.mocktime)

        self.log.info("testing retroactive signing with partially known TX")
        isolate_node(self.nodes[3])
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        # Make sure nodes 1 and 2 received the TX before we continue,
        # otherwise it might announce the TX to node 3 when reconnecting
        self.wait_for_tx(txid, self.nodes[1])
        self.wait_for_tx(txid, self.nodes[2])
        reconnect_isolated_node(self.nodes[3], 0)
        self.wait_for_mnauth(self.nodes[3], 2)
        # node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
        self.wait_for_instantlock(txid, self.nodes[0], False, 5)
        # Make node0 consider the TX as safe
        self.bump_mocktime(10 * 60 + 1)
        set_node_times(self.nodes, self.mocktime)
        block = self.nodes[0].generate(1)[0]
        self.wait_for_chainlocked_block_all_nodes(block)

        self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
        self.test_all_nodes_session_timeout(False)
        self.log.info("repeating test, but with cycled LLMQs")
        self.test_all_nodes_session_timeout(True)

        self.log.info("testing retroactive signing with partially known TX and single node session timeout")
        self.test_single_node_session_timeout(False)
        self.log.info("repeating test, but with cycled LLMQs")
        self.test_single_node_session_timeout(True)
Example #19
0
    def run_test(self):
        def findUtxoInList(txid, vout, utxo_list):
            for x in utxo_list:
                if x["txid"] == txid and x["vout"] == vout:
                    return True, x
            return False, None

        # Stake with node 0 and node 1 up to public spend activation (400)
        # 70 blocks: 5 blocks each (x7)
        self.log.info("Staking 70 blocks to reach public spends activation...")
        set_node_times(self.nodes, self.mocktime)
        for i in range(7):
            for peer in range(2):
                for nblock in range(5):
                    self.mocktime = self.generate_pos(peer, self.mocktime)
                sync_blocks(self.nodes)
        block_time_0 = block_time_1 = self.mocktime
        self.log.info("Blocks staked.")

        # Check balances
        self.log.info("Checking balances...")
        initial_balance = [
            self.get_tot_balance(i) for i in range(self.num_nodes)
        ]
        # --nodes 0, 1: 62 pow blocks + 55 pos blocks
        assert_equal(initial_balance[0], DecimalAmt(250.0 * (62 + 55)))
        assert_equal(initial_balance[1], DecimalAmt(250.0 * (62 + 55)))
        # --node 2: 62 pow blocks + 20 pos blocks - zc minted - zcfee
        assert_equal(initial_balance[2],
                     DecimalAmt(250.0 * (62 + 20) - 6666 - 0.08))
        assert_equal(self.nodes[2].getzerocoinbalance()['Total'],
                     DecimalAmt(6666))
        self.log.info("Balances ok.")

        # create the raw zerocoin spend txes
        addy = self.nodes[2].getnewaddress()
        self.log.info("Creating the raw zerocoin public spends...")
        mints = self.nodes[2].listmintedzerocoins(True, True)
        tx_A0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"],
                                                     addy)
        tx_A1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"],
                                                     addy)
        # Spending same coins to different recipients to get different txids
        new_addy = "mnGkNvsGnaL5jyTmtGw88fm2JA6CehcGMS"
        tx_B0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"],
                                                     new_addy)
        tx_B1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"],
                                                     new_addy)

        # Disconnect nodes
        minted_amount = mints[0]["denomination"] + mints[1]["denomination"]
        self.disconnect_all()

        # Stake one block with node-0 and save the stake input
        self.log.info("Staking 1 block with node 0...")
        initial_unspent_0 = self.nodes[0].listunspent()
        self.nodes[0].generate(1)
        block_time_0 += 60
        set_node_times(self.nodes, block_time_0)
        last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
        assert (len(last_block["tx"]) > 1)  # a PoS block has at least two txes
        coinstake_txid = last_block["tx"][1]
        coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
        assert (coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == ""
                )  # first output of coinstake is empty
        stakeinput = coinstake_tx["vin"][0]

        # The stake input was unspent 1 block ago, now it's not
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"],
                                   initial_unspent_0)
        assert (res and utxo["spendable"])
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"],
                                   self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is no longer spendable." %
                      (stakeinput["txid"][:9], stakeinput["txid"][-4:],
                       stakeinput["vout"]))

        # Relay zerocoin spends
        self.nodes[0].sendrawtransaction(tx_A0)
        self.nodes[0].sendrawtransaction(tx_A1)

        # Stake 10 more blocks with node-0 and check balances
        self.log.info("Staking 10 more blocks with node 0...")
        for i in range(10):
            block_time_0 = self.generate_pos(0, block_time_0)
        expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
        assert_equal(self.get_tot_balance(0), expected_balance_0)
        self.log.info("Balance for node 0 checks out.")

        # Connect with node 2, sync and check zerocoin balance
        self.log.info("Reconnecting node 0 and node 2")
        connect_nodes_bi(self.nodes, 0, 2)
        sync_blocks([self.nodes[i] for i in [0, 2]])
        self.log.info("Resetting zerocoin mints on node 2")
        self.nodes[2].resetmintzerocoin(True)
        assert_equal(self.get_tot_balance(2),
                     initial_balance[2] + DecimalAmt(minted_amount))
        assert_equal(self.nodes[2].getzerocoinbalance()['Total'],
                     DecimalAmt(6666 - minted_amount))
        self.log.info("Balance for node 2 checks out.")

        # Double spending txes not possible
        assert_raises_rpc_error(-26, "bad-txns-invalid-zbtcu",
                                self.nodes[0].sendrawtransaction, tx_B0)
        assert_raises_rpc_error(-26, "bad-txns-invalid-zbtcu",
                                self.nodes[0].sendrawtransaction, tx_B1)

        # verify that the stakeinput can't be spent
        stakeinput_tx_json = self.nodes[0].getrawtransaction(
            stakeinput["txid"], True)
        stakeinput_amount = float(stakeinput_tx_json["vout"][int(
            stakeinput["vout"])]["value"])
        rawtx_unsigned = self.nodes[0].createrawtransaction(
            [{
                "txid": stakeinput["txid"],
                "vout": int(stakeinput["vout"])
            }],
            {"mzVmgJQWw9wLCKcqT1nmMWRNL7oe4uLmH1": (stakeinput_amount - 0.01)})
        rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
        assert (rawtx["complete"])
        try:
            self.nodes[0].sendrawtransaction(rawtx["hex"])
        except JSONRPCException as e:
            # JSONRPCException was thrown as expected. Check the code and message values are correct.
            if e.error["code"] not in [-26, -25]:
                raise AssertionError("Unexpected JSONRPC error code %i" %
                                     e.error["code"])
            if ([
                    x for x in ["bad-txns-inputs-spent", "Missing inputs"]
                    if x in e.error['message']
            ] == []):
                raise e
        except Exception as e:
            raise AssertionError("Unexpected exception raised: " +
                                 type(e).__name__)
        self.log.info("GOOD: v2 spend was not possible.")

        # Spend tx_B0 and tx_B1 on the other chain
        self.nodes[1].sendrawtransaction(tx_B0)
        self.nodes[1].sendrawtransaction(tx_B1)

        # Stake 12 blocks with node-1
        set_node_times(self.nodes, block_time_1)
        self.log.info("Staking 12 blocks with node 1...")
        for i in range(12):
            block_time_1 = self.generate_pos(1, block_time_1)
        expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
        assert_equal(self.get_tot_balance(1), expected_balance_1)
        self.log.info("Balance for node 1 checks out.")

        # re-connect and sync nodes and check that node-0 and node-2 get on the other chain
        new_best_hash = self.nodes[1].getbestblockhash()
        self.log.info("Connecting and syncing nodes...")
        set_node_times(self.nodes, block_time_1)
        connect_nodes_clique(self.nodes)
        sync_blocks(self.nodes)
        for i in [0, 2]:
            assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)

        # check balance of node-0
        assert_equal(self.get_tot_balance(0), initial_balance[0])
        self.log.info("Balance for node 0 checks out.")

        # check that NOW the original stakeinput is present and spendable
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"],
                                   self.nodes[0].listunspent())
        assert (res and utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is spendable again." %
                      (stakeinput["txid"][:9], stakeinput["txid"][-4:],
                       stakeinput["vout"]))
        self.nodes[0].sendrawtransaction(rawtx["hex"])
        self.nodes[1].generate(1)
        sync_blocks(self.nodes)
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"],
                                   self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])
Example #20
0
    def run_test(self):
        def get_zerocoin_data(coin):
            return coin["s"], coin["r"], coin["k"], coin["id"], coin[
                "d"], coin["t"]

        def check_balances(denom, zecos_bal, ecos_bal):
            zecos_bal -= denom
            assert_equal(self.nodes[2].getzerocoinbalance()['Total'],
                         zecos_bal)
            ecos_bal += denom
            wi = self.nodes[2].getwalletinfo()
            assert_equal(wi['balance'] + wi['immature_balance'], ecos_bal)
            return zecos_bal, ecos_bal

        def stake_4_blocks(block_time):
            for peer in range(2):
                for i in range(2):
                    block_time = self.generate_pos(peer, block_time)
                sync_blocks(self.nodes)
            return block_time

        self.log_title()
        block_time = self.mocktime
        set_node_times(self.nodes, block_time)

        # Start with cache balances
        wi = self.nodes[2].getwalletinfo()
        balance = wi['balance'] + wi['immature_balance']
        zecos_balance = self.nodes[2].getzerocoinbalance()['Total']
        assert_equal(balance, DecimalAmt(13833.92))
        assert_equal(zecos_balance, 6666)

        # Export zerocoin data
        listmints = self.nodes[2].listmintedzerocoins(True, True)
        serial_ids = [mint["serial hash"] for mint in listmints]
        exported_zerocoins = [
            x for x in self.nodes[2].exportzerocoins(False)
            if x["id"] in serial_ids
        ]
        exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
        assert_equal(8, len(exported_zerocoins))

        # 1) Try to do a v3 spend before activation
        self.log.info("Trying to make a public spend...")
        serial_0, randomness_0, privkey_0, id_0, denom_0, tx_0 = get_zerocoin_data(
            exported_zerocoins[0])
        assert_raises_rpc_error(-4, "The transaction was rejected!",
                                self.nodes[2].spendrawzerocoin, serial_0,
                                randomness_0, denom_0, privkey_0, "", tx_0)
        self.log.info("GOOD: v3 spend is not possible yet.")

        # 2) Spend one minted coin - spend v2 (serial_0)
        self.log.info("Spending the minted coin with serial %s..." %
                      serial_0[:16])
        txid = self.nodes[2].spendzerocoin(denom_0, False, False, "",
                                           False)['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zecos_balance, balance = check_balances(denom_0, zecos_balance,
                                                balance)
        self.log.info("--> VALID COIN SPEND (v2) PASSED")

        # 3) stake more blocks - save a v3 spend for later (serial_1)
        serial_1, randomness_1, privkey_1, id_1, denom_1, tx_1 = get_zerocoin_data(
            exported_zerocoins[1])
        self.log.info("Staking 70 blocks to get to public spend activation")
        for j in range(5):
            for peer in range(2):
                for i in range(7):
                    block_time = self.generate_pos(peer, block_time)
                sync_blocks(self.nodes)
        old_spend_v3 = self.nodes[2].createrawzerocoinspend(id_1)

        # 4) Check spend v2 disabled
        serial_2, randomness_2, privkey_2, id_2, denom_2, tx_2 = get_zerocoin_data(
            exported_zerocoins[2])
        self.log.info("Trying to spend using the old coin spend method..")
        try:
            self.nodes[2].spendzerocoin(denom_2, False, False, "", False)
        except JSONRPCException as e:
            # JSONRPCException was thrown as expected. Check the code and message values are correct.
            if e.error["code"] != -4:
                raise AssertionError("Unexpected JSONRPC error code %i" %
                                     e.error["code"])
            if ([
                    x for x in [
                        "Couldn't generate the accumulator witness",
                        "The transaction was rejected!"
                    ] if x in e.error['message']
            ] == []):
                raise e
        except Exception as e:
            raise AssertionError("Unexpected exception raised: " +
                                 type(e).__name__)
        self.log.info("GOOD: v2 spend was not possible.")

        # 5) Spend one minted coin - spend v3 (serial_2)
        self.log.info("Spending the minted coin with serial %s..." %
                      serial_2[:16])
        txid = self.nodes[2].spendzerocoinmints([id_2])['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zecos_balance, balance = check_balances(denom_2, zecos_balance,
                                                balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")

        # 6) Check double spends - spend v3
        self.log.info("Trying to spend the serial twice now...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_2,
                                randomness_2, denom_2, privkey_2, "", tx_2)

        # 7) Activate v4 spends with SPORK_18
        self.setV4SpendEnforcement()

        # 8) Spend one minted coin - spend v4 (serial_3)
        serial_3, randomness_3, privkey_3, id_3, denom_3, tx_3 = get_zerocoin_data(
            exported_zerocoins[3])
        self.log.info("Spending the minted coin with serial %s..." %
                      serial_3[:16])
        txid = self.nodes[2].spendzerocoinmints([id_3])['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zecos_balance, balance = check_balances(denom_3, zecos_balance,
                                                balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v4) PASSED")

        # 9) Check double spends - spend v4
        self.log.info("Trying to spend the serial twice now...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_3,
                                randomness_3, denom_3, privkey_3, "", tx_3)

        # 10) Try to relay old v3 spend now (serial_1)
        self.log.info("Trying to send old v3 spend now...")
        assert_raises_rpc_error(-26, "bad-txns-invalid-zecos",
                                self.nodes[2].sendrawtransaction, old_spend_v3)
        self.log.info("GOOD: Old transaction not sent.")

        # 11) Try to double spend with v4 a mint already spent with v3 (serial_2)
        self.log.info("Trying to double spend v4 against v3...")
        assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
                                self.nodes[2].spendrawzerocoin, serial_2,
                                randomness_2, denom_2, privkey_2, "", tx_2)
        self.log.info("GOOD: Double-spending transaction did not verify.")

        # 12) Reactivate v3 spends and try to spend the old saved one (serial_1) again
        self.setV4SpendEnforcement(False)
        self.log.info("Trying to send old v3 spend now (serial: %s...)" %
                      serial_1[:16])
        txid = self.nodes[2].sendrawtransaction(old_spend_v3)
        # stake 4 blocks - check it gets included on chain and check balances
        _ = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        # need to reset spent mints since this was a raw broadcast
        self.nodes[2].resetmintzerocoin()
        _, _ = check_balances(denom_1, zecos_balance, balance)
        self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
Example #21
0
    def run_test(self):
        node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())

        expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED

        self.log.info("Check that node has signalled expected services.")
        assert_equal(node.nServices, expected_services)

        self.log.info("Check that the localservices is as expected.")
        assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)

        self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
        connect_nodes_bi(self.nodes, 0, 1)
        blocks = self.nodes[1].generate(292)
        set_node_times(self.nodes, TestNode.Mocktime)
        self.sync_blocks([self.nodes[0], self.nodes[1]])

        self.log.info("Make sure we can max retrieve block at tip-288.")
        node.send_getdata_for_block(blocks[1])  # last block in valid range
        node.wait_for_block(int(blocks[1], 16), timeout=3)

        self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
        node.send_getdata_for_block(blocks[0])  # first block outside of the 288+2 limit
        node.wait_for_disconnect(5)

        self.log.info("Check local address relay, do a fresh connection.")
        self.nodes[0].disconnect_p2ps()
        node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
        node1.send_message(msg_verack())

        node1.wait_for_addr()
        #must relay address with NODE_NETWORK_LIMITED
        assert_equal(node1.firstAddrnServices, expected_services)

        self.nodes[0].disconnect_p2ps()
        node1.wait_for_disconnect()

        # connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
        # because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
        connect_nodes_bi(self.nodes, 0, 2)
        try:
            self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
        except:
            pass
        # node2 must remain at height 0
        assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)

        # now connect also to node 1 (non pruned)
        connect_nodes_bi(self.nodes, 1, 2)

        # sync must be possible
        self.sync_blocks()

        # disconnect all peers
        self.disconnect_all()

        # mine 10 blocks on node 0 (pruned node)
        self.nodes[0].generate(10)

        # connect node1 (non pruned) with node0 (pruned) and check if the can sync
        connect_nodes_bi(self.nodes, 0, 1)

        # sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
        self.sync_blocks([self.nodes[0], self.nodes[1]])
Example #22
0
    def run_test(self):

        # Connect to node0
        p2p0 = self.nodes[0].add_p2p_connection(BaseNode())

        network_thread_start()
        self.nodes[0].p2p.wait_for_verack()

        # Build the blockchain
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        self.blocks = []

        # Get a pubkey for the coinbase TXO
        coinbase_key = CECKey()
        coinbase_key.set_secretbytes(b"horsebattery")
        coinbase_pubkey = coinbase_key.get_pubkey()

        # Create the first block with a coinbase output to our key
        height = 1
        block = create_block(self.tip, create_coinbase(height,
                                                       coinbase_pubkey),
                             self.block_time)
        self.blocks.append(block)
        self.block_time += 1
        block.solve()
        # Save the coinbase for later
        self.block1 = block
        self.tip = block.sha256
        height += 1

        # Bury the block 100 deep so the coinbase output is spendable
        for i in range(100):
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # Create a transaction spending the coinbase output with an invalid (null) signature
        tx = CTransaction()
        tx.vin.append(
            CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
        tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
        tx.calc_sha256()

        block102 = create_block(self.tip, create_coinbase(height),
                                self.block_time)
        self.block_time += 1
        block102.vtx.extend([tx])
        block102.hashMerkleRoot = block102.calc_merkle_root()
        block102.rehash()
        block102.solve()
        self.blocks.append(block102)
        self.tip = block102.sha256
        self.block_time += 1
        height += 1

        # Bury the assumed valid block 8400 deep (DFTz needs 4x as much blocks to allow -assumevalid to work)
        for i in range(8400):
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.nVersion = 4
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # We're adding new connections so terminate the network thread
        self.nodes[0].disconnect_p2ps()
        network_thread_join()

        # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
        self.start_node(1,
                        extra_args=self.extra_args +
                        ["-assumevalid=" + hex(block102.sha256)])
        self.start_node(2,
                        extra_args=self.extra_args +
                        ["-assumevalid=" + hex(block102.sha256)])

        p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
        p2p1 = self.nodes[1].add_p2p_connection(BaseNode())
        p2p2 = self.nodes[2].add_p2p_connection(BaseNode())

        network_thread_start()

        p2p0.wait_for_verack()
        p2p1.wait_for_verack()
        p2p2.wait_for_verack()

        # Make sure nodes actually accept the many headers
        self.mocktime = self.block_time
        set_node_times(self.nodes, self.mocktime)

        # send header lists to all three nodes.
        # node0 does not need to receive all headers
        # node1 must receive all headers as otherwise assumevalid is ignored in ConnectBlock
        # node2 should NOT receive all headers to force skipping of the assumevalid check in ConnectBlock
        p2p0.send_header_for_blocks(self.blocks[0:2000])
        p2p1.send_header_for_blocks(self.blocks[0:2000])
        p2p1.send_header_for_blocks(self.blocks[2000:4000])
        p2p1.send_header_for_blocks(self.blocks[4000:6000])
        p2p1.send_header_for_blocks(self.blocks[6000:8000])
        p2p1.send_header_for_blocks(self.blocks[8000:])
        p2p2.send_header_for_blocks(self.blocks[0:200])

        # Send blocks to node0. Block 102 will be rejected.
        self.send_blocks_until_disconnected(p2p0)
        self.assert_blockchain_height(self.nodes[0], 101)

        # Send 200 blocks to node1. All blocks, including block 102, will be accepted.
        for i in range(200):
            p2p1.send_message(msg_block(self.blocks[i]))
        # Syncing so many blocks can take a while on slow systems. Give it plenty of time to sync.
        p2p1.sync_with_ping(300)
        assert_equal(
            self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'],
            200)

        # Send blocks to node2. Block 102 will be rejected.
        self.send_blocks_until_disconnected(p2p2)
        self.assert_blockchain_height(self.nodes[2], 101)
    def run_test(self):
        node = self.nodes[0]

        self.mocktime = int(time.time())

        self.log.info("Test block finalization...")
        node.generatetoaddress(10, node.get_deterministic_priv_key()[0])
        tip = node.getbestblockhash()
        node.finalizeblock(tip)
        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        def wait_for_tip(node, tip):
            def check_tip():
                return node.getbestblockhash() == tip

            wait_until(check_tip)

        alt_node = self.nodes[1]
        wait_for_tip(alt_node, tip)

        alt_node.invalidateblock(tip)
        # We will use this later
        fork_block = alt_node.getbestblockhash()

        # Node 0 should not accept the whole alt_node's chain due to tip being finalized,
        # even though it is longer.
        # Headers would not be accepted if previousblock is invalid:
        #    - First block from alt node has same height than node tip, but is on a minority chain. Its
        #    status is "valid-headers"
        #    - Second block from alt node has height > node tip height, will be marked as invalid because
        #    node tip is finalized
        #    - Later blocks from alt node will be rejected because their previous block are invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(218 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)

        def wait_for_block(node, block, status="invalid"):
            def check_block():
                for tip in node.getchaintips():
                    if tip["hash"] == block:
                        assert tip["status"] != "active"
                        return tip["status"] == status
                return False

            wait_until(check_block)

        # First block header is accepted as valid-header
        alt_node.generatetoaddress(1, alt_node.get_deterministic_priv_key()[0])
        wait_for_block(node, alt_node.getbestblockhash(), "valid-headers")

        # Second block header is accepted but set invalid
        alt_node.generatetoaddress(1, alt_node.get_deterministic_priv_key()[0])
        invalid_block = alt_node.getbestblockhash()
        wait_for_block(node, invalid_block)

        # Later block headers are rejected
        for i in range(2, 9):
            alt_node.generatetoaddress(
                1,
                alt_node.get_deterministic_priv_key()[0])
            assert_raises_rpc_error(-5, RPC_BLOCK_NOT_FOUND_ERROR,
                                    node.getblockheader,
                                    alt_node.getbestblockhash())

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info("Test that an invalid block cannot be finalized...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock, invalid_block)

        self.log.info(
            "Test that invalidating a finalized block moves the finalization backward..."
        )

        # Node's finalized block will be invalidated, which causes the finalized block to
        # move to the previous block.
        #
        # Expected state:
        #
        # On alt_node:
        #                                                 >(210)->(211)-> // ->(218 tip)
        #                                                /
        # (200)->(201)-> // ->(208 auto-finalized)->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210 tip)
        node.invalidateblock(tip)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)

        assert_equal(
            alt_node.getfinalizedblockhash(),
            node.getblockheader(
                node.getfinalizedblockhash())['previousblockhash'])

        # The node will now accept that chain as the finalized block moved back.
        # Generate a new block on alt_node to trigger getheader from node
        # Previous 212-218 height blocks have been droped because their previous was invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)->(212)-> // ->(218)->(219 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(invalid_block)

        alt_node_tip = alt_node.generatetoaddress(
            1,
            alt_node.get_deterministic_priv_key()[0])[-1]
        wait_for_tip(node, alt_node_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), fork_block)
        assert_equal(alt_node.getfinalizedblockhash(), fork_block)

        self.log.info("Trigger reorg via block finalization...")
        # Finalize node tip to reorg
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                           >(210 invalid)-> // ->(219 invalid)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)
        node.finalizeblock(tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info("Try to finalize a block on a competiting fork...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock,
                                alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), tip)

        self.log.info(
            "Check auto-finalization occurs as the tip move forward...")
        # Reconsider alt_node tip then generate some more blocks on alt_node.
        # Auto-finalization will occur on both chains.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        node.reconsiderblock(alt_node.getbestblockhash())
        block_to_autofinalize = alt_node.generatetoaddress(
            1,
            alt_node.get_deterministic_priv_key()[0])[-1]
        alt_node_new_tip = alt_node.generatetoaddress(
            9,
            alt_node.get_deterministic_priv_key()[0])[-1]
        wait_for_tip(node, alt_node_new_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)
        assert_equal(alt_node.getfinalizedblockhash(), alt_node_tip)

        self.log.info(
            "Try to finalize a block on an already finalized chain...")
        # Finalizing a block of an already finalized chain should have no
        # effect
        block_218 = node.getblockheader(alt_node_tip)['previousblockhash']
        node.finalizeblock(block_218)
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)

        self.log.info(
            "Make sure reconsidering block move the finalization point...")
        # Reconsidering the tip will move back the finalized block on node
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), alt_node_new_tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)

        # TEST FINALIZATION DELAY

        self.log.info("Check that finalization delay prevents eclipse attacks")
        # Because there has been no delay since the beginning of this test,
        # there should have been no auto-finalization on delay_node.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        delay_node = self.nodes[2]
        alt_delay_node = self.nodes[3]

        wait_for_tip(delay_node, alt_node_new_tip)
        assert_equal(delay_node.getfinalizedblockhash(), str())

        self.log.info(
            "Check that finalization delay does not prevent auto-finalization")
        # Expire the delay, then generate 1 new block with alt_node to
        # update the tip on all chains.
        # Because the finalization delay is expired, auto-finalization
        # should occur.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node, alt_delay_node], self.mocktime)
        new_tip = alt_node.generatetoaddress(
            1,
            alt_node.get_deterministic_priv_key()[0])[-1]

        assert_equal(alt_node.getbestblockhash(), new_tip)
        assert_equal(alt_node.getfinalizedblockhash(), block_to_autofinalize)

        wait_for_tip(node, new_tip)
        assert_equal(node.getfinalizedblockhash(), block_to_autofinalize)

        wait_for_tip(delay_node, new_tip)
        self.log.info(
            "Check that finalization delay is effective on node boot")
        # Restart the new node, so the blocks have no header received time.
        self.restart_node(2)
        self.restart_node(3)
        # Connect the two delayed nodes
        connect_nodes_bi(self.nodes, 2, 3)

        # There should be no finalized block (getfinalizedblockhash returns an
        # empty string)
        assert_equal(delay_node.getfinalizedblockhash(), str())
        assert_equal(alt_delay_node.getfinalizedblockhash(), str())

        # Generate 20 blocks with no delay. This should not trigger auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(250 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        blocks = delay_node.generatetoaddress(
            20,
            alt_node.get_deterministic_priv_key()[0])
        reboot_autofinalized_block = blocks[10]
        new_tip = blocks[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(), str())

        # Now let the finalization delay to expire, then generate one more block.
        # This should resume auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(241 auto-finalized)-> // ->(251 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node, alt_delay_node], self.mocktime)
        new_tip = delay_node.generatetoaddress(
            1,
            delay_node.get_deterministic_priv_key()[0])[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(),
                     reboot_autofinalized_block)

        self.log.info("Check block delay edge cases")
        wait_for_tip(alt_delay_node, new_tip)
        '''
        Test plan:
            n2 mine 1 block
                n2: 0 1
                n3: 0 1
            within 2 hours, n3 invalidate 1, mine 1 public block, and followed by 11 more private blocks (more work).
            the same time n2 mine 10 blocks
                n2: 0 1   2  .. 11
                n3: 0 1' (2' .. 11' 12')
            now the 2 hour delay has passed. imagine now the honest chain hasn't mine a new block to activate the the checkpoint at 1.
            n3 can broadcast the blockchain, create an reorg, and trigger n2 to mark 1' as the checkpoint, though 1 was mined even
            before 1', though 1 is already eligible to be checkpointed.
                      1 .. 11 (less work fork)
                     /
                n2: 0 [1'] .. 12'
                n3: 0 [1'] .. 12'
        '''

        # finalize the current tip
        delay_node.finalizeblock(new_tip)
        alt_delay_node.finalizeblock(new_tip)
        finalized_block0 = new_tip

        # n2 mine 1 block
        #     n2: 0 1
        #     n3: 0 1
        honest1 = delay_node.generatetoaddress(
            1,
            node.get_deterministic_priv_key()[0])[-1]
        wait_for_tip(alt_delay_node, honest1)

        # 1 min passed
        self.mocktime += 1 * 60
        set_node_times([delay_node, alt_delay_node], self.mocktime)

        # within 2 hours, n3 invalidate 1, mine 1 public block, and followed by 11 more private blocks (more work).
        # the same time n2 mine 10 blocks
        #     n2: 0 1   2  .. 10
        #     n3: 0 1' (2' .. 10' 11')
        alt_delay_node.invalidateblock(honest1)
        attack_block1 = alt_delay_node.generatetoaddress(
            1,
            node.get_deterministic_priv_key()[0])[-1]
        wait_for_block(delay_node, attack_block1, status="valid-headers")

        # n3 mines the private chain
        alt_delay_node.setnetworkactive(False)
        wait_until(lambda: alt_delay_node.getnetworkinfo()['connections'] == 0,
                   timeout=3)
        attack_blocks = alt_delay_node.generatetoaddress(
            11,
            node.get_deterministic_priv_key()[0])
        # n2 mines the public chain
        honest_blocks = delay_node.generatetoaddress(
            10,
            node.get_deterministic_priv_key()[0])

        wait_for_tip(delay_node, honest_blocks[-1])
        wait_for_tip(alt_delay_node, attack_blocks[-1])

        # now both nodes still don't have any new block finalized
        assert_equal(finalized_block0, delay_node.getfinalizedblockhash())
        assert_equal(finalized_block0, alt_delay_node.getfinalizedblockhash())

        # now the 2 hour delay passed. imagine now the honest chain hasn't mine a new block to activate the the checkpoint at 1.
        # n3 broadcast the blockchain leading to a reorg, and trigger n2 to mark 1' as the checkpoint, though 1 was mined even
        # before 1', though 1 is already eligible to be checkpointed.
        self.mocktime += self.finalization_delay
        set_node_times([delay_node, alt_delay_node], self.mocktime)
        alt_delay_node.setnetworkactive(True)
        connect_nodes_bi(self.nodes, 2, 3)

        # we should see the attacking blocks take over the honest chain
        wait_for_tip(delay_node, attack_blocks[-1])
        # the attacking chain got finalized!
        self.log.info(f'attacking block1: {attack_block1}')
        print_blocks_to_finalized(self.log, delay_node)
        print_blocks_to_finalized(self.log, alt_delay_node)
        assert_equal(attack_block1, delay_node.getfinalizedblockhash())
Example #24
0
    def run_test(self):

        def findUtxoInList(txid, vout, utxo_list):
            for x in utxo_list:
                if x["txid"] == txid and x["vout"] == vout:
                    return True, x
            return False, None

        # FRAG supply: block rewards
        expected_money_supply = 250.0 * 200
        self.check_money_supply(expected_money_supply)
        block_time_0 = block_time_1 = self.mocktime

        # Check balances
        self.log.info("Checking balances...")
        initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
        # -- 50 pow blocks each
        assert_equal(initial_balance, [DecimalAmt(250.0 * 50)] * self.num_nodes)
        self.log.info("Balances ok.")

        # Disconnect nodes
        self.disconnect_all()

        # Stake one block with node-0 and save the stake input
        self.log.info("Staking 1 block with node 0...")
        initial_unspent_0 = self.nodes[0].listunspent()
        self.nodes[0].generate(1)
        block_time_0 += 60
        set_node_times(self.nodes, block_time_0)
        last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
        assert(len(last_block["tx"]) > 1)   # a PoS block has at least two txes
        coinstake_txid = last_block["tx"][1]
        coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
        assert (coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "")  # first output of coinstake is empty
        stakeinput = coinstake_tx["vin"][0]

        # The stake input was unspent 1 block ago, now it's not
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
        assert (res)
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res)
        self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))

        # Stake 10 more blocks with node-0 and check balances
        self.log.info("Staking 10 more blocks with node 0...")
        for i in range(10):
            block_time_0 = self.generate_pos(0, block_time_0)
        expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
        assert_equal(self.get_tot_balance(0), expected_balance_0)
        self.log.info("Balance for node 0 checks out.")

        # Connect with node 2 and sync
        self.log.info("Reconnecting node 0 and node 2")
        connect_nodes(self.nodes[0], 2)
        sync_blocks([self.nodes[i] for i in [0, 2]])

        # verify that the stakeinput can't be spent
        stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
        stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
        rawtx_unsigned = self.nodes[0].createrawtransaction(
            [{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
            {"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
        rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
        assert(rawtx["complete"])
        assert_raises_rpc_error(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx["hex"])
        txid = self.nodes[0].decoderawtransaction(rawtx["hex"])["txid"]
        assert_raises_rpc_error(-5, "No such mempool or blockchain transaction",
                                self.nodes[0].getrawtransaction, txid)
        self.log.info("GOOD: spending the stake input was not possible.")

        # Stake 12 blocks with node-1
        set_node_times(self.nodes, block_time_1)
        self.log.info("Staking 12 blocks with node 1...")
        for i in range(12):
            block_time_1 = self.generate_pos(1, block_time_1)
        expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
        assert_equal(self.get_tot_balance(1), expected_balance_1)
        self.log.info("Balance for node 1 checks out.")

        # re-connect and sync nodes and check that node-0 and node-2 get on the other chain
        new_best_hash = self.nodes[1].getbestblockhash()
        self.log.info("Connecting and syncing nodes...")
        set_node_times(self.nodes, block_time_1)
        connect_nodes_clique(self.nodes)
        sync_blocks(self.nodes)
        for i in [0, 2]:
            assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)

        # check balance of node-0
        assert_equal(self.get_tot_balance(0), initial_balance[0])
        self.log.info("Balance for node 0 checks out.")

        # check that NOW the original stakeinput is present and spendable
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (res and utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is spendable again." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
        self.nodes[0].sendrawtransaction(rawtx["hex"])
        self.nodes[1].generate(1)
        sync_blocks(self.nodes)
        res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])

        # Verify that FRAG supply was properly updated after the reorgs
        self.log.info("Check FRAG supply...")
        expected_money_supply += 250.0 * (self.nodes[1].getblockcount() - 200)
        self.check_money_supply(expected_money_supply)
        self.log.info("Supply checks out.")
Example #25
0
    def run_test(self):
        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        assert_equal(
            len(self.nodes[1].getpeerinfo()),
            2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()),
                     0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_rpc_error(-23, "IP/Subnet already banned",
                                self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet",
                                self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(
            len(self.nodes[1].listbanned()), 1
        )  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_rpc_error(-30, "Error: Unban failed",
                                self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19",
                             "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        self.bump_mocktime(2)
        set_node_times(self.nodes, self.mocktime)
        wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)

        self.stop_node(1)
        self.start_node(1)

        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        self.log.info("Test disconnectnode RPCs")

        self.log.info(
            "disconnectnode: fail to disconnect when calling with address and nodeid"
        )
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        node1 = self.nodes[0].getpeerinfo()[0]['addr']
        assert_raises_rpc_error(
            -32602,
            "Only one of address and nodeid should be provided.",
            self.nodes[0].disconnectnode,
            address=address1,
            nodeid=node1)

        self.log.info(
            "disconnectnode: fail to disconnect when calling with junk address"
        )
        assert_raises_rpc_error(-29,
                                "Node not found in connected nodes",
                                self.nodes[0].disconnectnode,
                                address="221B Baker Street")

        self.log.info(
            "disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [
            node
            for node in self.nodes[0].getpeerinfo() if node['addr'] == address1
        ]

        self.log.info("disconnectnode: successfully reconnect node")
        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [
            node for node in self.nodes[0].getpeerinfo()
            if node['addr'] == address1
        ]

        self.log.info(
            "disconnectnode: successfully disconnect node by node id")
        id1 = self.nodes[0].getpeerinfo()[0]['id']
        self.nodes[0].disconnectnode(nodeid=id1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [
            node for node in self.nodes[0].getpeerinfo() if node['id'] == id1
        ]
    def run_test(self):
        # Nodes synced but not connected
        self.mocktime = int(time.time())
        set_node_times(self.nodes, self.mocktime)
        ni = [node.getnetworkinfo() for node in self.nodes]
        assert_equal([x['connections'] for x in ni], [0] * self.num_nodes)
        self.log.info("Nodes disconnected from each other. Time: %d" %
                      self.mocktime)
        assert_equal([x['timeoffset'] for x in ni], [0] * self.num_nodes)
        self.log.info("Nodes have nTimeOffset 0")

        # Set node times.
        # nodes [1, 5]: set times to +10, +15, ..., +30 secs
        for i in range(1, 6):
            self.nodes[i].setmocktime(self.mocktime + 5 * (i + 1))
        # nodes [6, 7]: set time to -5, -10 secs
        for i in range(6, 8):
            self.nodes[i].setmocktime(self.mocktime - 5 * (i - 5))

        # connect nodes 1 and 2
        self.log.info("Connecting with node-1 (+10 s) and node-2 (+15 s)...")
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 0, 2)
        self.log.info("--> samples = [+0, +10, (+10), +15, +15]")
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 4)
        assert_equal(ni['timeoffset'], 10)
        self.connected_nodes = [self.nodes[1], self.nodes[2]]
        self.check_connected_nodes()
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])

        # connect node 3
        self.log.info(
            "Connecting with node-3 (+20 s). This will print the warning...")
        connect_nodes_bi(self.nodes, 0, 3)
        self.log.info("--> samples = [+0, +10, +10, (+15), +15, +20, +20]")
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 6)
        assert_equal(ni['timeoffset'], 15)
        self.connected_nodes.append(self.nodes[3])
        self.check_connected_nodes()
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])

        # connect node 6
        self.log.info("Connecting with node-6 (-5 s)...")
        connect_nodes_bi(self.nodes, 0, 6)
        self.log.info(
            "--> samples = [-5, -5, +0, +10, (+10), +15, +15, +20, +20]")
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 8)
        assert_equal(ni['timeoffset'], 10)
        self.connected_nodes.append(self.nodes[6])
        self.check_connected_nodes()
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])

        # connect node 4
        self.log.info(
            "Connecting with node-4 (+25 s). This will print the warning...")
        connect_nodes_bi(self.nodes, 0, 4)
        self.log.info(
            "--> samples = [-5, -5, +0, +10, +10, (+15), +15, +20, +20, +25, +25]"
        )
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 10)
        assert_equal(ni['timeoffset'], 15)
        self.connected_nodes.append(self.nodes[4])
        self.check_connected_nodes()
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])

        # try to connect node 5 and check that it can't
        self.log.info("Trying to connect with node-5 (+30 s)...")
        connect_nodes_bi(self.nodes, 0, 5)
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 10)
        assert_equal(ni['timeoffset'], 15)
        self.log.info("Not connected.")
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])

        # connect node 7
        self.log.info("Connecting with node-7 (-10 s)...")
        connect_nodes_bi(self.nodes, 0, 7)
        self.log.info(
            "--> samples = [-10, -10, -5, -5, +0, +10, (+10), +15, +15, +20, +20, +25, +25]"
        )
        ni = self.nodes[0].getnetworkinfo()
        assert_equal(ni['connections'], 12)
        assert_equal(ni['timeoffset'], 10)
        self.connected_nodes.append(self.nodes[6])
        self.check_connected_nodes()
        self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
Example #27
0
    def run_test(self):
        self.log.info('Prepare nodes and wallet')

        minernode = self.nodes[0]  # node used to mine BTC and create transactions
        usernode = self.nodes[1]  # user node with correct time
        restorenode = self.nodes[2]  # node used to restore user wallet and check time determination in ComputeSmartTime (wallet.cpp)

        # time constant
        cur_time = int(time.time())
        ten_days = 10 * 24 * 60 * 60

        # synchronize nodes and time
        self.sync_all()
        set_node_times(self.nodes, cur_time)

        # prepare miner wallet
        minernode.createwallet(wallet_name='default')
        miner_wallet = minernode.get_wallet_rpc('default')
        m1 = miner_wallet.getnewaddress()

        # prepare the user wallet with 3 watch only addresses
        wo1 = usernode.getnewaddress()
        wo2 = usernode.getnewaddress()
        wo3 = usernode.getnewaddress()

        usernode.createwallet(wallet_name='wo', disable_private_keys=True)
        wo_wallet = usernode.get_wallet_rpc('wo')

        wo_wallet.importaddress(wo1)
        wo_wallet.importaddress(wo2)
        wo_wallet.importaddress(wo3)

        self.log.info('Start transactions')

        # check blockcount
        assert_equal(minernode.getblockcount(), 200)

        # generate some btc to create transactions and check blockcount
        initial_mine = COINBASE_MATURITY + 1
        self.generatetoaddress(minernode, initial_mine, m1)
        assert_equal(minernode.getblockcount(), initial_mine + 200)

        # synchronize nodes and time
        self.sync_all()
        set_node_times(self.nodes, cur_time + ten_days)
        # send 10 btc to user's first watch-only address
        self.log.info('Send 10 btc to user')
        miner_wallet.sendtoaddress(wo1, 10)

        # generate blocks and check blockcount
        self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
        assert_equal(minernode.getblockcount(), initial_mine + 300)

        # synchronize nodes and time
        self.sync_all()
        set_node_times(self.nodes, cur_time + ten_days + ten_days)
        # send 5 btc to our second watch-only address
        self.log.info('Send 5 btc to user')
        miner_wallet.sendtoaddress(wo2, 5)

        # generate blocks and check blockcount
        self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
        assert_equal(minernode.getblockcount(), initial_mine + 400)

        # synchronize nodes and time
        self.sync_all()
        set_node_times(self.nodes, cur_time + ten_days + ten_days + ten_days)
        # send 1 btc to our third watch-only address
        self.log.info('Send 1 btc to user')
        miner_wallet.sendtoaddress(wo3, 1)

        # generate more blocks and check blockcount
        self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
        assert_equal(minernode.getblockcount(), initial_mine + 500)

        self.log.info('Check user\'s final balance and transaction count')
        assert_equal(wo_wallet.getbalance(), 16)
        assert_equal(len(wo_wallet.listtransactions()), 3)

        self.log.info('Check transaction times')
        for tx in wo_wallet.listtransactions():
            if tx['address'] == wo1:
                assert_equal(tx['blocktime'], cur_time + ten_days)
                assert_equal(tx['time'], cur_time + ten_days)
            elif tx['address'] == wo2:
                assert_equal(tx['blocktime'], cur_time + ten_days + ten_days)
                assert_equal(tx['time'], cur_time + ten_days + ten_days)
            elif tx['address'] == wo3:
                assert_equal(tx['blocktime'], cur_time + ten_days + ten_days + ten_days)
                assert_equal(tx['time'], cur_time + ten_days + ten_days + ten_days)

        # restore user wallet without rescan
        self.log.info('Restore user wallet on another node without rescan')
        restorenode.createwallet(wallet_name='wo', disable_private_keys=True)
        restorewo_wallet = restorenode.get_wallet_rpc('wo')

        # for descriptor wallets, the test framework maps the importaddress RPC to the
        # importdescriptors RPC (with argument 'timestamp'='now'), which always rescans
        # blocks of the past 2 hours, based on the current MTP timestamp; in order to avoid
        # importing the last address (wo3), we advance the time further and generate 10 blocks
        if self.options.descriptors:
            set_node_times(self.nodes, cur_time + ten_days + ten_days + ten_days + ten_days)
            self.generatetoaddress(minernode, 10, m1)

        restorewo_wallet.importaddress(wo1, rescan=False)
        restorewo_wallet.importaddress(wo2, rescan=False)
        restorewo_wallet.importaddress(wo3, rescan=False)

        # check user has 0 balance and no transactions
        assert_equal(restorewo_wallet.getbalance(), 0)
        assert_equal(len(restorewo_wallet.listtransactions()), 0)

        # proceed to rescan, first with an incomplete one, then with a full rescan
        self.log.info('Rescan last history part')
        restorewo_wallet.rescanblockchain(initial_mine + 350)
        self.log.info('Rescan all history')
        restorewo_wallet.rescanblockchain()

        self.log.info('Check user\'s final balance and transaction count after restoration')
        assert_equal(restorewo_wallet.getbalance(), 16)
        assert_equal(len(restorewo_wallet.listtransactions()), 3)

        self.log.info('Check transaction times after restoration')
        for tx in restorewo_wallet.listtransactions():
            if tx['address'] == wo1:
                assert_equal(tx['blocktime'], cur_time + ten_days)
                assert_equal(tx['time'], cur_time + ten_days)
            elif tx['address'] == wo2:
                assert_equal(tx['blocktime'], cur_time + ten_days + ten_days)
                assert_equal(tx['time'], cur_time + ten_days + ten_days)
            elif tx['address'] == wo3:
                assert_equal(tx['blocktime'], cur_time + ten_days + ten_days + ten_days)
                assert_equal(tx['time'], cur_time + ten_days + ten_days + ten_days)


        self.log.info('Test handling of invalid parameters for rescanblockchain')
        assert_raises_rpc_error(-8, "Invalid start_height", restorewo_wallet.rescanblockchain, -1, 10)
        assert_raises_rpc_error(-8, "Invalid stop_height", restorewo_wallet.rescanblockchain, 1, -1)
        assert_raises_rpc_error(-8, "stop_height must be greater than start_height", restorewo_wallet.rescanblockchain, 20, 10)
Example #28
0
    def reorg_test(self):
        height = int(self.options.height)
        peers = self.num_nodes
        tip_age = int(self.options.tip_age)
        should_reorg = int(self.options.should_reorg)

        self.log.info(f"Doing a reorg test with height: {height}, peers: {peers}, tip_age: {tip_age}.  " + \
                      f"Should reorg? *{should_reorg}*")

        asset_name = "MOON_STONES"
        adversary = self.nodes[0]
        subject = self.nodes[-1]

        # enough to activate assets
        start = 432

        self.log.info(f"Setting all node times to {tip_age} seconds ago...")
        now = int(round(time.time()))
        set_node_times(self.nodes, now - tip_age)

        self.log.info(
            f"Mining {start} starter blocks on all nodes and syncing...")
        subject.generate(round(start / 2))
        self.sync_all()
        adversary.generate(round(start / 2))
        self.sync_all()

        self.log.info("Stopping adversary node...")
        self.stop_node(0)

        self.log.info(f"Subject is issuing asset: {asset_name}...")
        subject.issue(asset_name)

        self.log.info(f"Miners are mining {height} blocks...")
        subject.generate(height)
        wait_until(lambda: [n.getblockcount()
                            for n in self.nodes[1:]] == [height + start] *
                   (peers - 1),
                   err_msg="Wait for BlockCount")
        self.log.info("BlockCount: " +
                      str([start] +
                          [n.getblockcount() for n in self.nodes[1:]]))

        self.log.info("Restarting adversary node...")
        self.start_node(0)

        self.log.info(f"Adversary is issuing asset: {asset_name}...")
        adversary.issue(asset_name)

        self.log.info(
            f"Adversary is mining {height*2} (2 x {height}) blocks over the next ~{tip_age} seconds..."
        )
        interval = round(tip_age / (height * 2)) + 1
        for i in range(0, height * 2):
            set_node_times(self.nodes, (now - tip_age) + ((i + 1) * interval))
            adversary.generate(1)
        assert (adversary.getblockcount() -
                start == (subject.getblockcount() - start) * 2)
        besttimes = [
            n.getblock(n.getbestblockhash())['time'] for n in self.nodes
        ]
        self.log.info("BestTimes: " + str(besttimes))
        self.log.info(
            f"Adversary: {besttimes[0]}; subject: {besttimes[-1]}; difference: {besttimes[0] - besttimes[-1]}; expected gte: {tip_age}"
        )
        assert (besttimes[0] - besttimes[-1] >= tip_age)

        self.log.info("BlockCount: " +
                      str([n.getblockcount() for n in self.nodes]))

        self.log.info("Reconnecting the network and syncing the chain...")
        for i in range(1, peers):
            connect_nodes_bi(self.nodes, 0, i, should_reorg)

        expected_height = start + height
        subject_owns_asset = True
        if should_reorg > 0:
            self.log.info(
                f"Expected a reorg -- blockcount should be {expected_height} and subject should own {asset_name} (waiting 5 seconds)..."
            )
            expected_height += height
            subject_owns_asset = False
        else:
            self.log.info(
                f"Didn't expect a reorg -- blockcount should remain {expected_height} and both subject and adversary should own {asset_name} (waiting 5 seconds)..."
            )

        # noinspection PyBroadException
        try:
            wait_until(
                lambda: [n.getblockcount()
                         for n in self.nodes] == [expected_height] * peers,
                timeout=5,
                err_msg="getblockcount")
        except:
            pass
        self.log.info("BlockCount: " +
                      str([n.getblockcount() for n in self.nodes]))
        assert_equal(subject.getblockcount(), expected_height)
        assert_contains_pair(asset_name + '!', 1, adversary.listmyassets())
        if subject_owns_asset:
            assert_contains_pair(asset_name + '!', 1, subject.listmyassets())
        else:
            assert_does_not_contain_key(asset_name + '!',
                                        subject.listmyassets())
Example #29
0
    def run_test(self):
        # Create one transaction on node 0 with a unique amount for
        # each possible type of wallet import RPC.
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.label = "label {} {}".format(i, variant)
            variant.address = self.nodes[1].getaddressinfo(
                self.nodes[1].getnewaddress(
                    label=variant.label,
                    address_type=variant.address_type.value,
                ))
            variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
            variant.initial_amount = get_rand_amount()
            variant.initial_txid = self.nodes[0].sendtoaddress(
                variant.address["address"], variant.initial_amount)
            self.nodes[0].generate(1)  # Generate one block for each send
            variant.confirmation_height = self.nodes[0].getblockcount()
            variant.timestamp = self.nodes[0].getblockheader(
                self.nodes[0].getbestblockhash())["time"]

        # Generate a block further in the future (past the rescan window).
        assert_equal(self.nodes[0].getrawmempool(), [])
        set_node_times(
            self.nodes,
            self.nodes[0].getblockheader(
                self.nodes[0].getbestblockhash())["time"] + TIMESTAMP_WINDOW +
            1,
        )
        self.nodes[0].generate(1)
        self.sync_all()

        # For each variation of wallet key import, invoke the import RPC and
        # check the results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            self.log.info('Run import for variant {}'.format(variant))
            expect_rescan = variant.rescan == Rescan.yes
            variant.node = self.nodes[
                2 +
                IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
            variant.do_import(variant.timestamp)
            if expect_rescan:
                variant.expected_balance = variant.initial_amount
                variant.expected_txs = 1
                variant.check(variant.initial_txid, variant.initial_amount,
                              variant.confirmation_height)
            else:
                variant.expected_balance = 0
                variant.expected_txs = 0
                variant.check()

        # Create new transactions sending to each address.
        for i, variant in enumerate(IMPORT_VARIANTS):
            variant.sent_amount = get_rand_amount()
            variant.sent_txid = self.nodes[0].sendtoaddress(
                variant.address["address"], variant.sent_amount)
            self.nodes[0].generate(1)  # Generate one block for each send
            variant.confirmation_height = self.nodes[0].getblockcount()

        assert_equal(self.nodes[0].getrawmempool(), [])
        self.sync_all()

        # Check the latest results from getbalance and listtransactions.
        for variant in IMPORT_VARIANTS:
            self.log.info('Run check for variant {}'.format(variant))
            variant.expected_balance += variant.sent_amount
            variant.expected_txs += 1
            variant.check(variant.sent_txid, variant.sent_amount,
                          variant.confirmation_height)
Example #30
0
    def run_test(self):
        def get_zerocoin_data(coin):
            return coin["s"], coin["r"], coin["k"], coin["id"], coin[
                "d"], coin["t"]

        def check_balances(denom, zoho_bal, oho_bal):
            zoho_bal -= denom
            assert_equal(self.nodes[2].getzerocoinbalance()['Total'], zoho_bal)
            oho_bal += denom
            wi = self.nodes[2].getwalletinfo()
            assert_equal(wi['balance'] + wi['immature_balance'], oho_bal)
            return zoho_bal, oho_bal

        def stake_4_blocks(block_time):
            for peer in range(2):
                for i in range(2):
                    block_time = self.generate_pos(peer, block_time)
                sync_blocks(self.nodes)
            return block_time

        q = 73829871667027927151400291810255409637272593023945445234219354687881008052707
        pow2 = 2**256
        K_BITSIZE = 128  # bitsize of the range for random K
        self.log_title()
        block_time = self.mocktime
        set_node_times(self.nodes, block_time)

        # Start with cache balances
        wi = self.nodes[2].getwalletinfo()
        balance = wi['balance'] + wi['immature_balance']
        zoho_balance = self.nodes[2].getzerocoinbalance()['Total']
        assert_equal(balance, DecimalAmt(13833.92))
        assert_equal(zoho_balance, 6666)

        # Export zerocoin data
        listmints = self.nodes[2].listmintedzerocoins(True, True)
        serial_ids = [mint["serial hash"] for mint in listmints]
        exported_zerocoins = [
            x for x in self.nodes[2].exportzerocoins(False)
            if x["id"] in serial_ids
        ]
        exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
        assert_equal(8, len(exported_zerocoins))

        # 1) Spend 1 coin and mine two more blocks
        serial_0, randomness_0, privkey_0, id_0, denom_0, tx_0 = get_zerocoin_data(
            exported_zerocoins[0])
        self.log.info("Spending the minted coin with serial %s..." %
                      serial_0[:16])
        txid = self.nodes[2].spendzerocoin(denom_0, False, False, "",
                                           False)['txid']
        # stake 4 blocks - check it gets included on chain and check balances
        block_time = stake_4_blocks(block_time)
        self.check_tx_in_chain(0, txid)
        zoho_balance, balance = check_balances(denom_0, zoho_balance, balance)
        self.log.info("Coin spent.")

        # 2) create 5  new coins
        new_coins = []
        for i in range(5):
            K = random.getrandbits(K_BITSIZE)
            new_coins.append({
                "s": hex(int(serial_0, 16) + K * q * pow2)[2:],
                "r": randomness_0,
                "d": denom_0,
                "p": privkey_0,
                "t": tx_0
            })

        # 3) Spend the new zerocoins (V2)
        for c in new_coins:
            self.log.info("V2 - Spending the wrapping serial %s" % c["s"])
            assert_raises_rpc_error(-4, "CoinSpend: failed check",
                                    self.nodes[2].spendrawzerocoin, c["s"],
                                    c["r"], c["d"], c["p"], "", c["t"], False)
        self.log.info("GOOD: It was not possible")