예제 #1
0
    def run_test(self):

        # When bitcoind is just started it's in IBD
        assert_raises_rpc_error(-10, "Bitcoin is downloading blocks...",
                                self.nodes[0].getblocktemplate)

        # Mature some coins for easy spending, have a tx in the mempool
        self.generate(self.nodes[0], 101)
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), "1")
        self.sync_all()

        # The nodes are connected, happy case
        gbt = self.nodes[0].getblocktemplate()
        assert self.template_contains_tx(gbt, txid)

        # Disconnect the nodes and verify that getblocktemplate fails
        # This is failfast behaviour, miners don't want to waste cycles
        # when they don't know if they're on the latest tip
        # and/or can't propagate blocks
        disconnect_nodes(self.nodes[0], self.nodes[1])
        assert_raises_rpc_error(-9, "Bitcoin is not connected!",
                                self.nodes[0].getblocktemplate)
        assert_raises_rpc_error(-9, "Bitcoin is not connected!",
                                self.nodes[1].getblocktemplate)

        # Reconnect the nodes and check that getblocktemplate works again
        # and that they're in sync
        connect_nodes_bi(self.nodes[0], self.nodes[1])
        gbt0 = self.nodes[0].getblocktemplate()
        gbt1 = self.nodes[1].getblocktemplate()
        assert_blocktemplate_equal(gbt0, gbt1)

        # Test that getblocktemplate will return a cached template
        # for the next 5 seconds
        mock_time = int(time.time())
        self.nodes[0].setmocktime(mock_time)
        template = self.nodes[0].getblocktemplate()

        # Add a new tx to the mempool
        newtxid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(),
                                              "1")

        # Fastforward 4 seconds, the template has not changed
        self.nodes[0].setmocktime(mock_time + 4)
        new_template = self.nodes[0].getblocktemplate()
        assert new_template == template
        assert not self.template_contains_tx(new_template, newtxid)

        # 5 seconds is when the cache expires, so it is a boundary condition
        # that would introduce non-determinism to the test. We test 6 seconds, instead.
        # Fastforward 6 seconds, the new tx has now been included in the template
        self.nodes[0].setmocktime(mock_time + 6)
        new_template = self.nodes[0].getblocktemplate()
        assert new_template != template
        assert self.template_contains_tx(new_template, newtxid)
    def run_test(self):
        # generate just 1 block to leave IBD state (no wallet is required for this test so we use hard-coded key)
        self.generatetoaddress(
            self.nodes[0], 1,
            self.nodes[0].get_deterministic_priv_key().address)

        self.sync_all()

        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()

        assert_blocktemplate_equal(gbtl0, gbtl1)

        # some random tx's from mainnet and testnet. They don't have to be valid on this chain for this test.
        txs = [
            "01000000016af14fe2b65b3fabe6a8f125de5ded1180aff9c3892138eefc16242f2dadfe2f00000000fd8a0100483045022100d80"
            "fa2758e4c1bc2b5b687b59d853c3a97e2b343b9ae1cb2bea0dce0e2cb1ca602200ac71e79dcde5d065ac99160be3376c8a373c016"
            "b5b6cef584b9a8eeb901b0a841483045022100d6a1a7393fa728404790bc85c26b60cf4d6d2baecfefca8b01608bb02441dc7c022"
            "056922cc8fa4d14eed39a69287a89c9d630164c23f4f810fa774e3feb6cdfea584147304402203f6a7ab7a5b91b0495ff6be292a5"
            "eee74bbf5c7b1cc6de586002ccf4142059a302200cf80778d4f4c078073d840b027a927a11d227bb87cbd043c37989f5cb01861c4"
            "14cad532102962feabd55f69c0e8eaceb7df43969dda4aeb575c7a501a4d08be392b2c48f2a2102a0e6e0def65cdb686a85c9a5cc"
            "03fc4c524831806f03cc7a18463b5267a5961421030b61fc10e70ca4fcedf95ca8284b545a5a80f255205c1c19e5eebcadbc17365"
            "921036d623ebfc46b97eb99a43d3c45df09319c8a6c9ba2b292c1a6a42e460034ed7a2103f54a07c2b5e82cf1e6465d7e37ee5a4b"
            "0701b2ccda866430190a8ebbd00f07db55aefeffffff022c1172000000000017a914e78564d75c446f8c00c757a2bd783d30c4f08"
            "19a8740e88e02000000001976a91471faafd5016aa8255d61e95cfe3c4f180504051e88ac48a80900",
            "0100000002ae54229545be8d2738e245e7ea41d089fa3def0a48e9410b49f39ec43826971d010000006a4730440220204169229eb1"
            "7dc49ad83675d693e4012453db9a8d1af6f118278152c709f6be022077081ab76df0356e53c1ba26145a3fb98ca58553a98b1c130a"
            "2f6cff4d39767f412103cfbc58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff4eca0e441d0a27"
            "f874f41739382cb80fdf3aac0f7b8316e197dd42e7155590c1010000006a47304402203832a75ccfc2f12474c1d3d2fc22cd72cc92"
            "4c1b73995a27a0d07b9c5a745f3a022035d98e1017a4cb02ff1509d17c752047dca2b270b927793f2eb9e30af1ac02d6412103cfbc"
            "58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff0260ea00000000000017a9149eefc3ae114359"
            "8a830d66cbc32aa583fa3d987687fb030100000000001976a914bddb57be877bd32264fc40670b87b6fb271813f688ac00000000",
            "0100000001993b9740d3e289876cbe6920008a35c4a08b7dc4bd48ff61b198f163af3f354900000000644102a8588b2e1a808ade29"
            "4aa76a1e63137099fa087841603a361171f0c1473396f482d8d1a61e2d3ff94280b1125114868647bff822d2a74461c6bbe6ffc06f"
            "9d412102abaad90841057ddb1ed929608b536535b0cd8a18ba0a90dba66ba7b1c1f7b4eafeffffff0176942200000000001976a91"
            "40a373caf0ab3c2b46cd05625b8d545c295b93d7a88acf3fa1400",
        ]

        node = self.nodes[0]
        gbt_dir = os.path.join(node.datadir, self.chain, 'gbt')
        trash_dir = os.path.join(gbt_dir, 'trash')

        def path_for_job(jid):
            return os.path.join(gbt_dir, jid)

        def trash_path_for_job(jid):
            return os.path.join(trash_dir, jid)

        self.log.info("gbt_dir: {}".format(gbt_dir))
        self.log.info("trash_dir: {}".format(trash_dir))

        gbtl = []
        job_ids = set()
        trashed_ids = set()
        removed_ids = set()
        stop_flag = threading.Event()

        def check_jobs():
            for j in set(job_ids):
                if not os.path.exists(path_for_job(j)):
                    if os.path.exists(trash_path_for_job(j)):
                        if not j in trashed_ids:
                            self.log.info(f'found trashed job {j}')
                            trashed_ids.add(j)
                    else:
                        if not j in removed_ids:
                            self.log.info(f'found removed job {j}')
                            removed_ids.add(j)

        def poll_thread():
            """This thread is necessary to scan the gbt_dir and trash_dir and not miss any files.
            It is a workaround to very slow gitlab CI (especially on aarch64)."""
            while not stop_flag.wait(0.100):  # poll every 100ms
                check_jobs()

        pthr = threading.Thread(target=poll_thread, daemon=True)
        pthr.start()

        try:
            # generate a bunch of unique job_ids
            txs_tmp = txs
            n_iters = self._cache_size * 3  # intentionally overfill past cache size
            assert n_iters
            for _ in range(n_iters):
                tstart = time.time()
                gbtl_res = node.getblocktemplatelight({}, txs_tmp)
                telapsed = time.time() - tstart
                jid = gbtl_res['job_id']
                self.log.info(
                    f'getblocktemplatelight returned job {jid} in {telapsed:.2f} seconds'
                )
                job_ids.add(jid)
                gbtl.append(gbtl_res)
                txs_tmp += txs
        finally:
            # Ensure subordinate poller thread is stopped, joined
            stop_flag.set()
            pthr.join()

        assert os.path.isdir(gbt_dir)
        assert os.path.isdir(trash_dir)
        assert len(job_ids) == n_iters

        def predicate():
            check_jobs()
            return job_ids == removed_ids

        wait_until(predicate, timeout=self._store_time * 2)

        assert_equal(job_ids, removed_ids)
        assert_equal(0, len(os.listdir(trash_dir)))

        # grab ids for jobs that are no longer in the in-memory LRU cache  -- they should all raise now that their
        # job data was deleted from disk.
        job_ids = [x['job_id'] for x in gbtl[:-self._cache_size]]

        assert job_ids and len(job_ids) == (n_iters - self._cache_size)

        # now, test that all the deleted ones are truly gone and raise the proper RPC error
        for i, job_id in enumerate(job_ids):
            tmpl = gbtl[i]
            block = messages.CBlock()
            block.nVersion = tmpl["version"]
            block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
            block.nTime = tmpl["curtime"]
            block.nBits = int(tmpl["bits"], 16)
            block.nNonce = 0
            coinbase_tx = blocktools.create_coinbase(
                height=int(tmpl["height"]) + 1)
            coinbase_tx.vin[0].nSequence = 2**32 - 2
            coinbase_tx.rehash()
            block.vtx = [coinbase_tx]
            assert_raises_rpc_error(-8, "job_id data not available",
                                    node.submitblocklight,
                                    block.serialize().hex(), job_id)
예제 #3
0
    def __run_test(self,
                   *,
                   nblocks_to_gen=150,
                   ntx_to_gen=19,
                   test_additional_txs=True):
        assert ntx_to_gen > 0
        # we will need this value for random_transaction below, and for self.gen_valid_tx
        self.min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]

        if nblocks_to_gen > 0:
            if self.is_wallet_compiled():
                # generate some blocks to wallet to have spendable coins
                self.nodes[0].generate(nblocks_to_gen)
            else:
                # generate just 1 block to leave IBD state (no wallet so no spending in this mode)
                self.nodes[0].generatetoaddress(
                    1, self.nodes[0].get_deterministic_priv_key().address)

        self.sync_all()

        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()

        assert_blocktemplate_equal(gbtl0, gbtl1)

        def check_gbt_store_dir(gbtdir, job_id):
            expected_data_file = os.path.join(gbtdir, job_id)
            assert os.path.exists(gbtdir), "The -gbtstoredir must exist"
            assert os.path.exists(
                expected_data_file
            ), "The -gbtstoredir must contain the expected job_id file"

        # check that node[1] is using the custom -gbtstoredir argument we gave it.
        check_gbt_store_dir(self._custom_gbt_dir, gbtl1['job_id'])

        self.check_job_id(gbtl0)
        self.check_merkle(gbtl0, [])  # empty merkle should be ok

        # generate a bunch of transactions
        txids = []
        ntx = ntx_to_gen if self.is_wallet_compiled() else 0
        for i in range(ntx):
            txid, txhex, fee = util.random_transaction((self.nodes[0], ),
                                                       Decimal("0.000123"),
                                                       self.min_relay_fee,
                                                       Decimal("0.000001"), 0)
            txids.append(txid)

        # Since we have two nodes, sync the mempools
        self.sync_all()

        # Wait for getblocktemplate to see the txids (it uses a 5s caching strategy before it calculates a new template)
        # 'setmocktime' here worked too but we prefer to let the clock advance normally, rather than use a pegged
        # mocktime for this test case.  (Real execution time for this whole test case is about the same whether using
        # mocktime or this polling strategy, so better to keep time advancing normally).
        self.wait_for_txs(txids, 0)
        self.wait_for_txs(txids, 1)

        # Check that, once the nodes are synced, they give the same template
        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()
        assert_blocktemplate_equal(gbtl0, gbtl1)

        # check job_id is ok
        self.check_job_id(gbtl0)
        # check merkle is ok
        self.check_merkle(gbtl0, txids)

        if self.is_wallet_compiled() and test_additional_txs:
            # add the signed tx to a job.. we wil submit this later (only iff wallet enabled)
            signedtx = self.gen_valid_tx()
            signedtxid = bytes2hex(hash256(bytes.fromhex(signedtx)))
            self.log.info("Signed txid: {}  hex: {}".format(
                signedtxid, signedtx))
            gbtl0 = self.nodes[0].getblocktemplatelight({}, [signedtx])
            submit_job_id = gbtl0['job_id']
            submit_tmpl = gbtl0
            self.check_job_id(gbtl0)
            self.check_merkle(gbtl0, txids + [signedtxid])
        else:
            # No wallet (or caller wants to not test additional_tx).
            # Just use the last job with no additional_txs as the submit job
            submit_job_id, submit_tmpl = gbtl0['job_id'], gbtl0

        # These tx's are invalid on this chain, but they do at least deserialize correctly, so we can use them
        # to make a bunch of jobs
        extratxs = [
            "0100000002ae54229545be8d2738e245e7ea41d089fa3def0a48e9410b49f39ec43826971d010000006a4730440220204169229eb1"
            "7dc49ad83675d693e4012453db9a8d1af6f118278152c709f6be022077081ab76df0356e53c1ba26145a3fb98ca58553a98b1c130a"
            "2f6cff4d39767f412103cfbc58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff4eca0e441d0a27"
            "f874f41739382cb80fdf3aac0f7b8316e197dd42e7155590c1010000006a47304402203832a75ccfc2f12474c1d3d2fc22cd72cc92"
            "4c1b73995a27a0d07b9c5a745f3a022035d98e1017a4cb02ff1509d17c752047dca2b270b927793f2eb9e30af1ac02d6412103cfbc"
            "58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff0260ea00000000000017a9149eefc3ae114359"
            "8a830d66cbc32aa583fa3d987687fb030100000000001976a914bddb57be877bd32264fc40670b87b6fb271813f688ac00000000",
            "0100000001993b9740d3e289876cbe6920008a35c4a08b7dc4bd48ff61b198f163af3f354900000000644102a8588b2e1a808ade29"
            "4aa76a1e63137099fa087841603a361171f0c1473396f482d8d1a61e2d3ff94280b1125114868647bff822d2a74461c6bbe6ffc06f"
            "9d412102abaad90841057ddb1ed929608b536535b0cd8a18ba0a90dba66ba7b1c1f7b4eafeffffff0176942200000000001976a91"
            "40a373caf0ab3c2b46cd05625b8d545c295b93d7a88acf3fa1400",
        ]
        extratxids = bytes2hex(
            [hash256(x) for x in hex2bytes(extratxs, rev=False)])

        # test "additional_txs"
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
        self.check_job_id(gbtl0)
        self.check_merkle(gbtl0, txids + extratxids)

        # test that the "additional_txs" didn't stick around in the cached pblocktemplate in getblocktemplatecommon
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
        self.check_merkle(gbtl0, txids + extratxids)
        gbt0 = self.nodes[0].getblocktemplate()
        assert_equal(sorted(txids), [x['txid'] for x in gbt0['transactions']])
        # try extratxs twice; they should both be present (known behavior)
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs + extratxs)
        self.check_merkle(gbtl0, txids + extratxids + extratxids)

        # try regular getblocktemplatelight again, without extratxs, test that extratxs didn't stick around
        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()
        assert_blocktemplate_equal(gbtl0, gbtl1)
        self.check_merkle(gbtl0, txids)

        # Test RPC errors

        # bad txn hex (decode failure) at index 1
        assert_raises_rpc_error(-22,
                                "additional_txs transaction 1 decode failure",
                                self.nodes[0].getblocktemplatelight, {},
                                [extratxs[1], extratxs[0][:-15]])

        tmpl = submit_tmpl
        job_id = submit_job_id
        coinbase_tx = blocktools.create_coinbase(height=int(tmpl["height"]) +
                                                 1)
        coinbase_tx.vin[0].nSequence = 2**32 - 2
        coinbase_tx.rehash()

        block = messages.CBlock()
        block.nVersion = tmpl["version"]
        block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
        block.nTime = tmpl["curtime"]
        block.nBits = int(tmpl["bits"], 16)
        block.nNonce = 0
        block.vtx = [coinbase_tx]
        block.hashMerkleRoot = merkle_root_from_cb_and_branch(
            hash256(coinbase_tx.serialize()), hex2bytes(tmpl['merkle']))
        block.solve()

        # Be evil and attempt to send 2 tx's. Note that this code assumes the nTx check on the C++ side happens
        # before the merkle root check (which is the case currently).
        saved_vtx = block.vtx
        block.vtx = [coinbase_tx] * 2
        assert_raises_rpc_error(
            -22, "Block must contain a single coinbase tx (light version)",
            self.nodes[0].submitblocklight,
            block.serialize().hex(), job_id)
        # swap it back to the correct value
        block.vtx = saved_vtx
        del saved_vtx

        # Test decode failure on bad block (this particular test is also done in the regular submitblock RPC tests
        # but we do it here too to be thorough)
        assert_raises_rpc_error(-22, "Block decode failed",
                                self.nodes[0].submitblocklight,
                                block.serialize()[:-15].hex(), job_id)

        # Check bad job_id (not uint160 hex)
        bad_job_id = "abcdefxx123"
        assert_raises_rpc_error(
            -1,
            "job_id must be a 40 character hexadecimal string (not '{bad}')".
            format(bad=bad_job_id), self.nodes[0].submitblocklight,
            block.serialize().hex(), bad_job_id)

        # Check unknown job_id error
        assert_raises_rpc_error(-8, "job_id data not available",
                                self.nodes[0].submitblocklight,
                                block.serialize().hex(), "ab" * 20)

        def expire_in_memory_cache(num):
            """ Keeps creating new job_Ids so as to expire the in-memory cache for old jobs (default cache size: 10) """
            txs = extratxs * 3
            txi = extratxids * 3
            for i in range(num):
                tmpl = self.nodes[0].getblocktemplatelight({}, txs)
                self.check_job_id(tmpl)
                self.check_merkle(tmpl, txids + txi)
                txs += extratxs
                txi += extratxids

        expire_in_memory_cache(self._cache_size + 1)
        # at this point our submit_job_id's data will come from a file in the gbt/ dir, rather than the in-memory cache

        # And finally, actually submit the block using submitblocklight
        self.log.info("submitting for job_id: {} merkles: {}   HEX: {}".format(
            job_id, tmpl['merkle'],
            block.serialize().hex()))
        res = self.nodes[0].submitblocklight(block.serialize().hex(), job_id)
        self.log.info("submit result: {}".format(repr(res)))
        assert_equal(res, None)

        self.sync_all()

        # Test that block was in fact added to the blockchain and that both nodes see it
        blockHashHex = self.nodes[0].getbestblockhash()
        self.log.info("Accepted block hash: {}".format(blockHashHex))
        block.rehash()
        assert_equal(blockHashHex, block.hash)
        assert_equal(blockHashHex, self.nodes[1].getbestblockhash())

        # Final check -- check for proper RPC error when there is a problem writing out the job data file.
        # However, we can only do this simulated check on non-Windows, posix OS's, when we are not root.
        # Note that in most CI's we run as root, so this check will be skipped in CI.
        if not platform.system().lower().startswith(
                'win') and os.name == 'posix' and os.getuid() != 0:
            orig_mode = None
            try:
                self.log.info("'simulated save failure' test will execute")
                orig_mode = os.stat(self._custom_gbt_dir).st_mode
                new_mode = orig_mode & ~(stat.S_IWUSR | stat.S_IWGRP
                                         | stat.S_IWOTH)
                # Set chmod of data directory to read-only to simulate an error writing to the job data file.
                # This should cause the anticipated error on the C++ side.
                os.chmod(self._custom_gbt_dir, new_mode)
                assert_raises_rpc_error(
                    -32603,  # RPC_INTERNAL_ERROR
                    "failed to save job tx data to disk",
                    self.nodes[1].getblocktemplatelight,
                    {},
                    extratxs)
            finally:
                if orig_mode is not None:
                    # undo the damage to the directory's mode from above
                    os.chmod(self._custom_gbt_dir, orig_mode)
        else:
            self.log.info(
                "'simulated save failure' test skipped because either we are uid 0 or we are on Windows"
            )