async def test_subscribe_scripthash(self, n, cli):
        logging.info("Testing scripthash subscription")
        addr = n.getnewaddress()
        scripthash = address_to_scripthash(addr)
        statushash, queue = await cli.subscribe(
            'blockchain.scripthash.subscribe', scripthash)

        logging.info("Unused address should not have a statushash")
        assert_equal(None, statushash)

        logging.info("Check notification on receiving coins")
        n.sendtoaddress(addr, 10)
        sh, new_statushash1 = await asyncio.wait_for(queue.get(), timeout=10)
        assert_equal(scripthash, sh)
        assert (new_statushash1 != None and len(new_statushash1) == 64)

        logging.info("Check notification on block confirmation")
        assert (len(n.getrawmempool()) == 1)
        n.generate(1)
        assert (len(n.getrawmempool()) == 0)
        sh, new_statushash2 = await asyncio.wait_for(queue.get(), timeout=10)
        assert_equal(scripthash, sh)
        assert (new_statushash2 != new_statushash1)
        assert (new_statushash2 != None)

        logging.info(
            "Check that we get notification when spending funds from address")
        n.sendtoaddress(n.getnewaddress(), n.getbalance(), "", "", True)
        sh, new_statushash3 = await asyncio.wait_for(queue.get(), timeout=10)
        assert_equal(scripthash, sh)
        assert (new_statushash3 != new_statushash2)
        assert (new_statushash3 != None)
    def run_test(self):
        n = self.nodes[0]

        n.generate(200)

        # waitFor throws on timeout, failing the test

        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(10, lambda: compare(n, "mempool_count", 0, True))
        n.sendtoaddress(n.getnewaddress(), 1)
        assert_equal(1, len(n.getrawmempool()))
        waitFor(10, lambda: compare(n, "mempool_count", 1, True))

        blocks = n.generate(50)
        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(10, lambda: compare(n, "mempool_count", 0, True))

        logging.info("invalidating %d blocks", len(blocks))
        n.invalidateblock(blocks[0])
        # electrum server should trim its chain as well and see our
        # transaction go back into mempool
        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(10, lambda: compare(n, "mempool_count", 1, True))

        n.generate(50)
        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(10, lambda: compare(n, "mempool_count", 0, True))
    def run_test(self):
        n = self.nodes[0]

        logging.info("Checking that blocks are indexed")
        n.generate(200)

        # waitFor throws on timeout, failing the test

        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(
            10,
            lambda: compare(n, "index_txns",
                            n.getblockcount() + 1, True))  # +1 is genesis tx
        waitFor(10, lambda: compare(n, "mempool_count", 0, True))

        logging.info("Check that mempool is communicated")
        n.sendtoaddress(n.getnewaddress(), 1)
        assert_equal(1, len(n.getrawmempool()))
        waitFor(10, lambda: compare(n, "mempool_count", 1, True))

        n.generate(1)
        assert_equal(0, len(n.getrawmempool()))
        waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
        waitFor(10, lambda: compare(n, "mempool_count", 0, True))
        waitFor(10, lambda: compare(n, "index_txns",
                                    n.getblockcount() + 2, True))
    def run_test(self):
        n = self.nodes[0]

        logging.info("Checking that blocks are indexed")
        n.generate(200)

        self.test_mempoolsync(n)
        electrum_client = create_electrum_connection()
        self.test_address_balance(n, electrum_client)
示例#5
0
    def run_test(self):
        n = self.nodes[0]

        logging.info("Checking that blocks are indexed")
        n.generate(200)

        self.test_mempoolsync(n)

        async def async_tests():
            electrum_client = ElectrumConnection()
            await electrum_client.connect()
            await self.test_unknown_method(electrum_client)
            await self.test_invalid_args(electrum_client)
            await self.test_address_balance(n, electrum_client)

        loop = asyncio.get_event_loop()
        loop.run_until_complete(async_tests())
    def _test_gettxoutsetinfo(self):
        node = self.nodes[0]
        res = node.gettxoutsetinfo()

        assert_equal(res['total_amount'], Decimal('8725.00000000'))
        assert_equal(res['transactions'], 200)
        assert_equal(res['height'], 200)
        assert_equal(res['txouts'], 200)
        size = res["disk_size"]
        assert (size > 6400)
        assert (size < 64000)
        assert_equal(res['bestblock'], node.getblockhash(200))
        assert_equal(len(res['bestblock']), 64)
        assert_equal(len(res['hash_serialized_2']), 64)

        logging.info(
            "Test that gettxoutsetinfo() works for blockchain with just the genesis block"
        )
        b1hash = node.getblockhash(1)
        node.invalidateblock(b1hash)

        res2 = node.gettxoutsetinfo()
        assert_equal(res2['transactions'], 0)
        assert_equal(res2['total_amount'], Decimal('0'))
        assert_equal(res2['height'], 0)
        assert_equal(res2['txouts'], 0)
        assert_equal(res2['bestblock'], node.getblockhash(0))
        assert_equal(len(res2['hash_serialized_2']), 64)

        logging.info(
            "Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block"
        )
        node.reconsiderblock(b1hash)

        res3 = node.gettxoutsetinfo()
        assert_equal(res['total_amount'], res3['total_amount'])
        assert_equal(res['transactions'], res3['transactions'])
        assert_equal(res['height'], res3['height'])
        assert_equal(res['txouts'], res3['txouts'])
        assert_equal(res['bestblock'], res3['bestblock'])
        assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
    def _test_getblockchaininfo(self):
        logging.info("Test getblockchaininfo")

        keys = [
            'bestblockhash',
            'bip135_forks',
            'bip9_softforks',
            'blocks',
            'chain',
            'chainwork',
            'difficulty',
            'headers',
            'initialblockdownload',
            'mediantime',
            'pruned',
            'softforks',
            'verificationprogress',
        ]
        res = self.nodes[0].getblockchaininfo()

        assert_equal(sorted(res.keys()), keys)
    async def test_subscribe(self, n, subscribe, unsubscribe, addr_converter):
        cli = ElectrumConnection()
        await cli.connect()

        logging.info("Testing scripthash subscription")
        addr = n.getnewaddress()
        statushash, queue = await cli.subscribe(subscribe,
                                                addr_converter(addr))

        logging.info("Unused address should not have a statushash")
        assert_equal(None, statushash)

        logging.info("Check notification on receiving coins")
        n.sendtoaddress(addr, 10)
        subscription_name, new_statushash1 = await asyncio.wait_for(
            queue.get(), timeout=10)
        assert_equal(addr_converter(addr), subscription_name)
        assert (new_statushash1 != None and len(new_statushash1) == 64)

        logging.info("Check notification on block confirmation")
        assert (len(n.getrawmempool()) == 1)
        n.generate(1)
        assert (len(n.getrawmempool()) == 0)
        subscription_name, new_statushash2 = await asyncio.wait_for(
            queue.get(), timeout=10)
        assert_equal(addr_converter(addr), subscription_name)
        assert (new_statushash2 != new_statushash1)
        assert (new_statushash2 != None)

        logging.info(
            "Check that we get notification when spending funds from address")
        n.sendtoaddress(n.getnewaddress(), n.getbalance(), "", "", True)
        subscription_name, new_statushash3 = await asyncio.wait_for(
            queue.get(), timeout=10)
        assert_equal(addr_converter(addr), subscription_name)
        assert (new_statushash3 != new_statushash2)
        assert (new_statushash3 != None)

        # Clear mempool
        n.generate(1)
    async def test_subscribe_headers(self, n):
        cli = ElectrumConnection()
        await cli.connect()
        headers = []

        logging.info(
            "Calling subscribe should return the current best block header")
        result, queue = await cli.subscribe('blockchain.headers.subscribe')
        assert_equal(n.getblockheader(n.getbestblockhash(), False),
                     result['hex'])

        logging.info(
            "Now generate 10 blocks, check that these are pushed to us.")

        async def test():
            for _ in range(10):
                blockhashes = n.generate(1)
                header_hex = n.getblockheader(blockhashes.pop(), False)
                notified = await asyncio.wait_for(queue.get(), timeout=10)
                assert_equal(header_hex, notified.pop()['hex'])

        start = time.time()
        await test()
        logging.info("Getting 10 block notifications took {} seconds".format(
            time.time() - start))
    async def test_subscribe_limit(self, n):
        cli = ElectrumConnection()
        await cli.connect()
        logging.info("Testing scripthash subscription limit.")

        # Subscribe up to limit
        scripthashes = []
        for i in range(0, MAX_SCRIPTHASH_SUBSCRIPTIONS):
            s = address_to_scripthash(n.getnewaddress())
            await cli.subscribe('blockchain.scripthash.subscribe', s)
            scripthashes.append(s)

        # Next subscription should fail
        s = address_to_scripthash(n.getnewaddress())

        await assert_raises_async(ElectrumErrorResponse, cli.call,
                                  "blockchain.scripthash.subscribe", s)

        try:
            await cli.call("blockchain.scripthash.subscribe", s)
        except ElectrumErrorResponse as e:
            error_code = "-32600"
            assert error_code in str(e)
            assert "subscriptions limit reached" in str(e)

        # Subscribing to an existing subscription should not affect the limit.
        await cli.subscribe('blockchain.scripthash.subscribe', scripthashes[0])

        # Unsubscribing should allow for a new subscription
        ok = await cli.call('blockchain.scripthash.unsubscribe',
                            scripthashes[0])
        assert (ok)
        await cli.subscribe('blockchain.scripthash.subscribe', s)

        # ... and also enforce the limit again
        await assert_raises_async(ElectrumErrorResponse, cli.call,
                                  'blockchain.scripthash.subscribe',
                                  address_to_scripthash(n.getnewaddress()))

        cli.disconnect()
示例#11
0
    def _test_getblockchaininfo(self):
        logging.info("Test getblockchaininfo")

        keys = [
            'bestblockhash',
            'bip135_forks',
            'bip9_softforks',
            'blocks',
            'chain',
            'chainwork',
            'difficulty',
            'headers',
            'initialblockdownload',
            'mediantime',
            'pruned',
            'softforks',
            'size_on_disk',
            'verificationprogress',
        ]

        res = self.nodes[2].getblockchaininfo()
        # result should have pruneheight and default keys if pruning is enabled
        assert_equal(sorted(res.keys()),
                     sorted(keys + ['pruneheight', 'prune_target_size']))
        # pruneheight should be greater or equal to 0
        assert res['pruneheight'] >= 0

        # size_on_disk should be > 0
        assert res['size_on_disk'] > 0

        # check other pruning fields given that prune=1
        assert res['pruned']

        assert_equal(res['prune_target_size'], 1625292800)

        stop_node(self.nodes[2], 2)
        del (self.nodes[-1])
        res = self.nodes[0].getblockchaininfo()
        assert_equal(sorted(res.keys()), sorted(keys))
 def setup_chain(self):
     logging.info("Initializing test directory " + self.options.tmpdir)
     initialize_chain(self.options.tmpdir)
    def _test_rollbackchain_and_reconsidermostworkchain(self):
        # Save the hash of the current chaintip and then mine 10 blocks
        blockcount = self.nodes[0].getblockcount()

        self.nodes[0].generate(10)
        self.sync_all()
        assert_equal(blockcount + 10, self.nodes[0].getblockcount())
        assert_equal(blockcount + 10, self.nodes[1].getblockcount())

        # Now Rollback the chain on Node 0 by 5 blocks
        logging.info("Test that rollbackchain() works")
        blockcount = self.nodes[0].getblockcount()
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 5)
        assert_equal(blockcount - 5, self.nodes[0].getblockcount())
        assert_equal(blockcount, self.nodes[1].getblockcount())

        # Invalidate the chaintip on Node 0 and then mine more blocks on Node 1
        # - Node1 should advance in chain length but Node 0 shoudd not follow.
        self.nodes[1].generate(5)
        time.sleep(2)  # give node0 a chance to sync (it shouldn't)

        assert_equal(self.nodes[0].getblockcount() + 10,
                     self.nodes[1].getblockcount())
        assert_not_equal(self.nodes[0].getbestblockhash(),
                         self.nodes[1].getbestblockhash())

        # Now mine blocks on node0 which will extend the chain beyond node1.
        self.nodes[0].generate(12)

        # Reconnect nodes since they will have been disconnected when nod0's chain was previously invalidated.
        # -  Node1 should re-org and follow node0's chain.
        connect_nodes_bi(self.nodes, 0, 1)
        self.sync_all()
        assert_equal(self.nodes[0].getblockcount(),
                     self.nodes[1].getblockcount())
        assert_equal(self.nodes[0].getbestblockhash(),
                     self.nodes[1].getbestblockhash())

        # Test that we can only rollback the chain by max 100 blocks
        self.nodes[0].generate(100)
        self.sync_all()

        # Roll back by 101 blocks, this should fail
        blockcount = self.nodes[0].getblockcount()
        try:
            self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 101)
        except JSONRPCException as e:
            logging.info(e.error['message'])
            assert (
                "You are attempting to rollback the chain by 101 blocks, however the limit is 100 blocks."
                in e.error['message'])
        assert_equal(blockcount, self.nodes[0].getblockcount())
        assert_equal(blockcount, self.nodes[1].getblockcount())

        # Now rollback by 100 blocks
        bestblockhash = self.nodes[0].getbestblockhash()  #save for later
        blockcount = self.nodes[0].getblockcount()
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 100)
        assert_equal(blockcount - 100, self.nodes[0].getblockcount())
        assert_equal(blockcount, self.nodes[1].getblockcount())

        # Now reconsider the now invalid chaintip on node0 which will reconnect the blocks
        self.nodes[0].reconsiderblock(bestblockhash)
        self.sync_all()

        # Now rollback by 101 blocks by using the override
        bestblockhash = self.nodes[0].getbestblockhash()  #save for later
        blockcount = self.nodes[0].getblockcount()
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 101, True)
        assert_equal(blockcount - 101, self.nodes[0].getblockcount())
        assert_equal(blockcount, self.nodes[1].getblockcount())

        # Now reconsider the now invalid chaintip on node0 which will reconnect the blocks
        self.nodes[0].reconsiderblock(bestblockhash)
        self.sync_all()

        ### Test that we can rollback the chain beyond a forkpoint and then reconnect
        #   the blocks on either chain

        # Mine a few blocks
        self.nodes[0].generate(50)

        # Invalidate the chaintip and then mine another chain
        bestblockhash1 = self.nodes[0].getbestblockhash()  #save for later
        self.nodes[0].invalidateblock(bestblockhash1)
        self.nodes[0].generate(5)

        # Reconsider the previous chain so both chains are either valid or fork-active.
        self.nodes[0].reconsiderblock(bestblockhash1)

        # Invalidate the current longer fork2 and mine 10 blocks on fork1
        # which now makes it the longer fork
        bestblockhashfork2 = self.nodes[0].getbestblockhash()  #save for later
        self.nodes[0].invalidateblock(bestblockhashfork2)
        self.nodes[0].generate(10)

        # Reconsider fork2 so both chains are active.
        # fork1 should be 10 blocks long and fork 2 should be 5 blocks long with fork1 being active
        # and fork2 being fork-valid.
        self.nodes[0].reconsiderblock(bestblockhashfork2)

        # Now we're ready to test the rollback. Rollback beyond the fork point (more than 10 blocks).
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 20)

        # Reconsider the fork1. Blocks should now be fully reconnected on fork1.
        self.nodes[0].reconsiderblock(bestblockhash1)
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhash1)

        # Rollback again beyond the fork point (more than 10 blocks).
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 20)

        # Reconsider the fork2. Blocks should now be fully reconnected on fork2.
        self.nodes[0].reconsiderblock(bestblockhashfork2)
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhashfork2)

        #### Start testing reconsidermostworkchain
        # Create an additional fork 3 which is the longest fork. Then make the shortest
        # fork2 the active chain.  Then do a reconsidermostworkchain which should make
        # fork3 the active chain, and disregarding fork1 which is longer than fork2 but
        # shorter than fork3.
        logging.info("Test that reconsidermostworkchain() works")

        # rollback to before fork 1 and 2, and then mine another longer fork 3
        self.nodes[0].rollbackchain(self.nodes[0].getblockcount() - 120, True)
        fork3blocks = 140
        self.nodes[0].generate(fork3blocks)
        bestblockhashfork3 = self.nodes[0].getbestblockhash()  #save for later

        # now rollback again and make the shortest fork2 the active chain
        self.nodes[0].rollbackchain(
            self.nodes[0].getblockcount() - fork3blocks, True)
        self.nodes[0].reconsiderblock(bestblockhashfork2)
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhashfork2)

        # do a reconsidermostworkchain but without the override flag
        try:
            self.nodes[0].reconsidermostworkchain()
        except JSONRPCException as e:
            logging.info(e.error['message'])
            assert (
                "You are attempting to rollback the chain by 120 blocks, however the limit is 100 blocks."
                in e.error['message'])
        # check that nothing happened and we're still on the same chaintip
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhashfork2)

        # now do a reconsidermostworkchain with the override. We should now be on fork3 best block hash
        self.nodes[0].reconsidermostworkchain(True)
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhashfork3)

        # check that we can run reconsidermostworkchain when we're already on the correct chain
        try:
            self.nodes[0].reconsidermostworkchain()
        except JSONRPCException as e:
            logging.info(e.error['message'])
            assert ("Nothing to do. Already on the correct chain."
                    in e.error['message'])
        # check that nothing happened and we're still on the same chaintip
        assert_equal(self.nodes[0].getbestblockhash(), bestblockhashfork3)