예제 #1
0
    def run_test(self):
        self.log.info(
            "Warning: this test will take about 70 seconds in the best case. Be patient."
        )
        self.nodes[0].generate(10)
        template = self.nodes[0].getblocktemplate()
        longpollid = template['longpollid']
        # longpollid should not change between successive invocations if nothing else happens
        template_2 = self.nodes[0].getblocktemplate()
        assert (template_2['longpollid'] == longpollid)

        # Test 1: test that the longpolling wait if we do nothing
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # check that thread still lives
        thr.join(5)  # wait 5 seconds or until thread exits
        assert (thr.is_alive())

        # Test 2: test that longpoll will terminate if another node generates a block
        self.nodes[1].generate(1)  # generate a block on another node
        # check that thread will exit now that new transaction entered mempool
        thr.join(5)  # wait 5 seconds or until thread exits
        assert (not thr.is_alive())

        # Test 3: test that longpoll will terminate if we generate a block ourselves
        thr = LongpollThread(self.nodes[0])
        thr.start()
        self.nodes[0].generate(1)  # generate a block on another node
        thr.join(5)  # wait 5 seconds or until thread exits
        assert (not thr.is_alive())

        # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # generate a random transaction and submit it
        min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
        # min_relay_fee is fee per 1000 bytes, which should be more than enough.
        random_transaction(self.nodes, Decimal("1.1"), min_relay_fee,
                           Decimal("0.001"), 20)
        # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
        thr.join(60 + 20)
        assert (not thr.is_alive())
예제 #2
0
    def run_test(self):
        self.log.info(
            "Warning: this test will take about 70 seconds in the best case. Be patient.")
        self.nodes[0].generate(10)
        templat = self.nodes[0].getblocktemplate()
        longpollid = templat['longpollid']
        # longpollid should not change between successive invocations if
        # nothing else happens
        templat2 = self.nodes[0].getblocktemplate()
        assert(templat2['longpollid'] == longpollid)

        # Test 1: test that the longpolling wait if we do nothing
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # check that thread still lives
        # wait 5 seconds or until thread exits
        thr.join(5)
        assert(thr.is_alive())

        # Test 2: test that longpoll will terminate if another node generates a block
        # generate a block on another node
        self.nodes[1].generate(1)
        # check that thread will exit now that new transaction entered mempool
        # wait 5 seconds or until thread exits
        thr.join(5)
        assert(not thr.is_alive())

        # Test 3: test that longpoll will terminate if we generate a block
        # ourselves
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # generate a block on another node
        self.nodes[0].generate(1)
        # wait 5 seconds or until thread exits
        thr.join(5)
        assert(not thr.is_alive())

        # Test 4: test that introducing a new transaction into the mempool will
        # terminate the longpoll
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # generate a random transaction and submit it
        (txid, txhex, fee) = random_transaction(self.nodes,
                                                Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
        # after one minute, every 10 seconds the mempool is probed, so in 80
        # seconds it should have returned
        thr.join(60 + 20)
        assert(not thr.is_alive())
    def run_test(self):
        self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
        self.nodes[0].generate(10)
        template = self.nodes[0].getblocktemplate()
        longpollid = template['longpollid']
        # longpollid should not change between successive invocations if nothing else happens
        template2 = self.nodes[0].getblocktemplate()
        assert(template2['longpollid'] == longpollid)

        # Test 1: test that the longpolling wait if we do nothing
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # check that thread still lives
        thr.join(5)  # wait 5 seconds or until thread exits
        assert(thr.is_alive())

        # Test 2: test that longpoll will terminate if another node generates a block
        self.nodes[1].generate(1)  # generate a block on another node
        # check that thread will exit now that new transaction entered mempool
        thr.join(5)  # wait 5 seconds or until thread exits
        assert(not thr.is_alive())

        # Test 3: test that longpoll will terminate if we generate a block ourselves
        thr = LongpollThread(self.nodes[0])
        thr.start()
        self.nodes[0].generate(1)  # generate a block on another node
        thr.join(5)  # wait 5 seconds or until thread exits
        assert(not thr.is_alive())

        # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # generate a random transaction and submit it
        min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
        # min_relay_fee is fee per 1000 bytes, which should be more than enough.
        (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
        # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
        thr.join(60 + 20)
        assert(not thr.is_alive())
예제 #4
0
    def __run_test(self,
                   *,
                   nblocks_to_gen=150,
                   ntx_to_gen=19,
                   test_additional_txs=True):
        assert ntx_to_gen > 0
        # we will need this value for random_transaction below, and for self.gen_valid_tx
        self.min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]

        if nblocks_to_gen > 0:
            if self.is_wallet_compiled():
                # generate some blocks to wallet to have spendable coins
                self.nodes[0].generate(nblocks_to_gen)
            else:
                # generate just 1 block to leave IBD state (no wallet so no spending in this mode)
                self.nodes[0].generatetoaddress(
                    1, self.nodes[0].get_deterministic_priv_key().address)

        self.sync_all()

        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()

        assert_blocktemplate_equal(gbtl0, gbtl1)

        def check_gbt_store_dir(gbtdir, job_id):
            expected_data_file = os.path.join(gbtdir, job_id)
            assert os.path.exists(gbtdir), "The -gbtstoredir must exist"
            assert os.path.exists(
                expected_data_file
            ), "The -gbtstoredir must contain the expected job_id file"

        # check that node[1] is using the custom -gbtstoredir argument we gave it.
        check_gbt_store_dir(self._custom_gbt_dir, gbtl1['job_id'])

        self.check_job_id(gbtl0)
        self.check_merkle(gbtl0, [])  # empty merkle should be ok

        # generate a bunch of transactions
        txids = []
        ntx = ntx_to_gen if self.is_wallet_compiled() else 0
        for i in range(ntx):
            txid, txhex, fee = util.random_transaction((self.nodes[0], ),
                                                       Decimal("0.000123"),
                                                       self.min_relay_fee,
                                                       Decimal("0.000001"), 0)
            txids.append(txid)

        # Since we have two nodes, sync the mempools
        self.sync_all()

        # Wait for getblocktemplate to see the txids (it uses a 5s caching strategy before it calculates a new template)
        # 'setmocktime' here worked too but we prefer to let the clock advance normally, rather than use a pegged
        # mocktime for this test case.  (Real execution time for this whole test case is about the same whether using
        # mocktime or this polling strategy, so better to keep time advancing normally).
        self.wait_for_txs(txids, 0)
        self.wait_for_txs(txids, 1)

        # Check that, once the nodes are synced, they give the same template
        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()
        assert_blocktemplate_equal(gbtl0, gbtl1)

        # check job_id is ok
        self.check_job_id(gbtl0)
        # check merkle is ok
        self.check_merkle(gbtl0, txids)

        if self.is_wallet_compiled() and test_additional_txs:
            # add the signed tx to a job.. we wil submit this later (only iff wallet enabled)
            signedtx = self.gen_valid_tx()
            signedtxid = bytes2hex(hash256(bytes.fromhex(signedtx)))
            self.log.info("Signed txid: {}  hex: {}".format(
                signedtxid, signedtx))
            gbtl0 = self.nodes[0].getblocktemplatelight({}, [signedtx])
            submit_job_id = gbtl0['job_id']
            submit_tmpl = gbtl0
            self.check_job_id(gbtl0)
            self.check_merkle(gbtl0, txids + [signedtxid])
        else:
            # No wallet (or caller wants to not test additional_tx).
            # Just use the last job with no additional_txs as the submit job
            submit_job_id, submit_tmpl = gbtl0['job_id'], gbtl0

        # These tx's are invalid on this chain, but they do at least deserialize correctly, so we can use them
        # to make a bunch of jobs
        extratxs = [
            "0100000002ae54229545be8d2738e245e7ea41d089fa3def0a48e9410b49f39ec43826971d010000006a4730440220204169229eb1"
            "7dc49ad83675d693e4012453db9a8d1af6f118278152c709f6be022077081ab76df0356e53c1ba26145a3fb98ca58553a98b1c130a"
            "2f6cff4d39767f412103cfbc58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff4eca0e441d0a27"
            "f874f41739382cb80fdf3aac0f7b8316e197dd42e7155590c1010000006a47304402203832a75ccfc2f12474c1d3d2fc22cd72cc92"
            "4c1b73995a27a0d07b9c5a745f3a022035d98e1017a4cb02ff1509d17c752047dca2b270b927793f2eb9e30af1ac02d6412103cfbc"
            "58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff0260ea00000000000017a9149eefc3ae114359"
            "8a830d66cbc32aa583fa3d987687fb030100000000001976a914bddb57be877bd32264fc40670b87b6fb271813f688ac00000000",
            "0100000001993b9740d3e289876cbe6920008a35c4a08b7dc4bd48ff61b198f163af3f354900000000644102a8588b2e1a808ade29"
            "4aa76a1e63137099fa087841603a361171f0c1473396f482d8d1a61e2d3ff94280b1125114868647bff822d2a74461c6bbe6ffc06f"
            "9d412102abaad90841057ddb1ed929608b536535b0cd8a18ba0a90dba66ba7b1c1f7b4eafeffffff0176942200000000001976a91"
            "40a373caf0ab3c2b46cd05625b8d545c295b93d7a88acf3fa1400",
        ]
        extratxids = bytes2hex(
            [hash256(x) for x in hex2bytes(extratxs, rev=False)])

        # test "additional_txs"
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
        self.check_job_id(gbtl0)
        self.check_merkle(gbtl0, txids + extratxids)

        # test that the "additional_txs" didn't stick around in the cached pblocktemplate in getblocktemplatecommon
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
        self.check_merkle(gbtl0, txids + extratxids)
        gbt0 = self.nodes[0].getblocktemplate()
        assert_equal(sorted(txids), [x['txid'] for x in gbt0['transactions']])
        # try extratxs twice; they should both be present (known behavior)
        gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs + extratxs)
        self.check_merkle(gbtl0, txids + extratxids + extratxids)

        # try regular getblocktemplatelight again, without extratxs, test that extratxs didn't stick around
        gbtl0 = self.nodes[0].getblocktemplatelight()
        gbtl1 = self.nodes[1].getblocktemplatelight()
        assert_blocktemplate_equal(gbtl0, gbtl1)
        self.check_merkle(gbtl0, txids)

        # Test RPC errors

        # bad txn hex (decode failure) at index 1
        assert_raises_rpc_error(-22,
                                "additional_txs transaction 1 decode failure",
                                self.nodes[0].getblocktemplatelight, {},
                                [extratxs[1], extratxs[0][:-15]])

        tmpl = submit_tmpl
        job_id = submit_job_id
        coinbase_tx = blocktools.create_coinbase(height=int(tmpl["height"]) +
                                                 1)
        coinbase_tx.vin[0].nSequence = 2**32 - 2
        coinbase_tx.rehash()

        block = messages.CBlock()
        block.nVersion = tmpl["version"]
        block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
        block.nTime = tmpl["curtime"]
        block.nBits = int(tmpl["bits"], 16)
        block.nNonce = 0
        block.vtx = [coinbase_tx]
        block.hashMerkleRoot = merkle_root_from_cb_and_branch(
            hash256(coinbase_tx.serialize()), hex2bytes(tmpl['merkle']))
        block.solve()

        # Be evil and attempt to send 2 tx's. Note that this code assumes the nTx check on the C++ side happens
        # before the merkle root check (which is the case currently).
        saved_vtx = block.vtx
        block.vtx = [coinbase_tx] * 2
        assert_raises_rpc_error(
            -22, "Block must contain a single coinbase tx (light version)",
            self.nodes[0].submitblocklight,
            block.serialize().hex(), job_id)
        # swap it back to the correct value
        block.vtx = saved_vtx
        del saved_vtx

        # Test decode failure on bad block (this particular test is also done in the regular submitblock RPC tests
        # but we do it here too to be thorough)
        assert_raises_rpc_error(-22, "Block decode failed",
                                self.nodes[0].submitblocklight,
                                block.serialize()[:-15].hex(), job_id)

        # Check bad job_id (not uint160 hex)
        bad_job_id = "abcdefxx123"
        assert_raises_rpc_error(
            -1,
            "job_id must be a 40 character hexadecimal string (not '{bad}')".
            format(bad=bad_job_id), self.nodes[0].submitblocklight,
            block.serialize().hex(), bad_job_id)

        # Check unknown job_id error
        assert_raises_rpc_error(-8, "job_id data not available",
                                self.nodes[0].submitblocklight,
                                block.serialize().hex(), "ab" * 20)

        def expire_in_memory_cache(num):
            """ Keeps creating new job_Ids so as to expire the in-memory cache for old jobs (default cache size: 10) """
            txs = extratxs * 3
            txi = extratxids * 3
            for i in range(num):
                tmpl = self.nodes[0].getblocktemplatelight({}, txs)
                self.check_job_id(tmpl)
                self.check_merkle(tmpl, txids + txi)
                txs += extratxs
                txi += extratxids

        expire_in_memory_cache(self._cache_size + 1)
        # at this point our submit_job_id's data will come from a file in the gbt/ dir, rather than the in-memory cache

        # And finally, actually submit the block using submitblocklight
        self.log.info("submitting for job_id: {} merkles: {}   HEX: {}".format(
            job_id, tmpl['merkle'],
            block.serialize().hex()))
        res = self.nodes[0].submitblocklight(block.serialize().hex(), job_id)
        self.log.info("submit result: {}".format(repr(res)))
        assert_equal(res, None)

        self.sync_all()

        # Test that block was in fact added to the blockchain and that both nodes see it
        blockHashHex = self.nodes[0].getbestblockhash()
        self.log.info("Accepted block hash: {}".format(blockHashHex))
        block.rehash()
        assert_equal(blockHashHex, block.hash)
        assert_equal(blockHashHex, self.nodes[1].getbestblockhash())

        # Final check -- check for proper RPC error when there is a problem writing out the job data file.
        # However, we can only do this simulated check on non-Windows, posix OS's, when we are not root.
        # Note that in most CI's we run as root, so this check will be skipped in CI.
        if not platform.system().lower().startswith(
                'win') and os.name == 'posix' and os.getuid() != 0:
            orig_mode = None
            try:
                self.log.info("'simulated save failure' test will execute")
                orig_mode = os.stat(self._custom_gbt_dir).st_mode
                new_mode = orig_mode & ~(stat.S_IWUSR | stat.S_IWGRP
                                         | stat.S_IWOTH)
                # Set chmod of data directory to read-only to simulate an error writing to the job data file.
                # This should cause the anticipated error on the C++ side.
                os.chmod(self._custom_gbt_dir, new_mode)
                assert_raises_rpc_error(
                    -32603,  # RPC_INTERNAL_ERROR
                    "failed to save job tx data to disk",
                    self.nodes[1].getblocktemplatelight,
                    {},
                    extratxs)
            finally:
                if orig_mode is not None:
                    # undo the damage to the directory's mode from above
                    os.chmod(self._custom_gbt_dir, orig_mode)
        else:
            self.log.info(
                "'simulated save failure' test skipped because either we are uid 0 or we are on Windows"
            )
예제 #5
0
        # check that thread still lives
        thr.join(5)  # wait 5 seconds or until thread exits
        assert thr.is_alive()

        # Test 2: test that longpoll will terminate if another node generates a block
        self.nodes[1].generate(1)  # generate a block on another node
        # check that thread will exit now that new transaction entered mempool
        thr.join(5)  # wait 5 seconds or until thread exits
        assert not thr.is_alive()

        # Test 3: test that longpoll will terminate if we generate a block ourselves
        thr = LongpollThread(self.nodes[0])
        thr.start()
        self.nodes[0].generate(1)  # generate a block on another node
        thr.join(5)  # wait 5 seconds or until thread exits
        assert not thr.is_alive()

        # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
        thr = LongpollThread(self.nodes[0])
        thr.start()
        # generate a random transaction and submit it
        min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
        # min_relay_fee is fee per 1000 bytes, which should be more than enough.
        (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
        # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
        thr.join(60 + 20)
        assert not thr.is_alive()

if __name__ == '__main__':
    GetBlockTemplateLPTest().main()