Exemplo n.º 1
0
  def __init__(self, node):
    threading.Thread.__init__(self)

    template = node.getblocktemplate({'rules': ['segwit']})
    self.longpollid = template['longpollid']

    self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
 def __init__(self, node):
     threading.Thread.__init__(self)
     # query current longpollid
     template = node.getblocktemplate()
     self.longpollid = template['longpollid']
     # create a new connection to the node, we can't use the same
     # connection from two threads
     self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
 def __init__(self, node):
     threading.Thread.__init__(self)
     # query current longpollid
     templat = node.getblocktemplate()
     self.longpollid = templat['longpollid']
     # create a new connection to the node, we can't use the same
     # connection from two threads
     self.node = get_rpc_proxy(node.url, 1, timeout=600)
Exemplo n.º 4
0
 def run_test(self):
     node = get_rpc_proxy(self.nodes[0].url,
                          1,
                          timeout=600,
                          coveragedir=self.nodes[0].coverage_dir)
     Thread(target=test_long_call, args=(node, )).start()
     # wait 1 second to ensure event loop waits for current connections to close
     self.stop_node(0, wait=1000)
Exemplo n.º 5
0
 def __init__(self, node):
     threading.Thread.__init__(self)
     # query current longpollid
     template = node.getblocktemplate({'rules': ['segwit']})
     self.longpollid = template['longpollid']
     # create a new connection to the node, we can't use the same
     # connection from two threads
     self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 200, 200)

        yield test

        txHashes = []
        for i in range(18):
            txLarge = create_transaction(
                out[i].tx, out[i].n, b"", ONE_MEGABYTE * 256,
                CScript([
                    OP_FALSE, OP_RETURN,
                    bytearray([42] * (ONE_MEGABYTE * 256))
                ]))
            self.test.connections[0].send_message(msg_tx(txLarge))
            self.check_mempool(node, [txLarge], timeout=6000)
            txHashes.append([txLarge.hash, txLarge.sha256])

        txOverflow = create_transaction(
            out[18].tx, out[18].n, b"", ONE_MEGABYTE * 305,
            CScript(
                [OP_FALSE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 305))]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow], timeout=6000)
        txHashes.append([txOverflow.hash, txOverflow.sha256])

        txOverflow = create_transaction(
            out[19].tx, out[19].n, b"", ONE_MEGABYTE,
            CScript([OP_FALSE, OP_RETURN,
                     bytearray([42] * ONE_MEGABYTE)]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow], timeout=6000)
        txHashes.append([txOverflow.hash, txOverflow.sha256])

        # Mine block with new transactions.
        self.log.info("BLOCK 2 - mining")
        minedBlock2 = node.generate(1)
        self.log.info("BLOCK 2 - mined")

        for txHash in txHashes:
            tx = FromHex(CTransaction(),
                         self.nodes[0].getrawtransaction(txHash[0]))
            tx.rehash()
            assert_equal(tx.sha256, txHash[1])
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        # Create a new block & setup initial chain with spendable outputs
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        block(0)
        yield self.accepted()
        test, out, _ = prepare_init_chain(self.chain, self.num_blocks,
                                          self.num_blocks + 1)
        yield test

        # Create 1GB block
        block(1, spend=out[0], block_size=1 * ONE_GIGABYTE)
        yield self.accepted(
            420
        )  # larger timeout is needed to prevent timeouts on busy machine and debug builds

        # Create long chain of smaller blocks
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(6000 + i, spend=out[i + 1], block_size=64 * ONE_KILOBYTE)
            test.blocks_and_transactions.append([self.chain.tip, True])
        yield test

        # Launch another node with config that should avoid a stall during IBD
        self.log.info("Launching extra nodes")
        self.add_node(
            2,
            extra_args=[
                '-whitelist=127.0.0.1',
                '-excessiveblocksize=%d' % (ONE_GIGABYTE * 6),
                '-blockmaxsize=%d' % (ONE_GIGABYTE * 6),
                '-maxtxsizepolicy=%d' % ONE_GIGABYTE, '-maxscriptsizepolicy=0',
                '-rpcservertimeout=1000',
                '-genesisactivationheight=%d' % self.genesisactivationheight,
                "-txindex", "-maxtipage=0", "-blockdownloadwindow=64",
                "-blockstallingtimeout=6"
            ],
            init_data_dir=True)
        self.start_node(2)

        # Connect the new nodes up so they do IBD
        self.log.info("Starting IBD")
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[1], 2)
        self.sync_all(
            timeout=240
        )  # larger timeout is needed to prevent timeouts on busy machine and debug builds

        # Check we didn't hit a stall for node2
        assert (not check_for_log_msg(self, "stalling block download",
                                      "/node2"))
Exemplo n.º 8
0
 def run_test(self):
     node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
     # Force connection establishment by executing a dummy command.
     node.getblockcount()
     Thread(target=test_long_call, args=(node,)).start()
     # Wait until the server is executing the above `waitfornewblock`.
     self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
     # Wait 1 second after requesting shutdown but not before the `stop` call
     # finishes. This is to ensure event loop waits for current connections
     # to close.
     self.stop_node(0, wait=1000)
Exemplo n.º 9
0
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url, 1, timeout=6000, coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash( int(node.getbestblockhash(), 16) )
        # Create a new block
        block(0)

        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(200):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # Collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(200):
            out.append(self.chain.get_spendable_output())

        txHashes = []
        for i in range(18):
            txLarge = create_transaction(out[i].tx, out[i].n, b"", ONE_MEGABYTE * 256, CScript([OP_FALSE, OP_RETURN, bytearray([42] * (ONE_MEGABYTE * 256))]))
            self.test.connections[0].send_message(msg_tx(txLarge))
            self.check_mempool(node, [txLarge])
            txHashes.append([txLarge.hash, txLarge.sha256])

        txOverflow = create_transaction(out[18].tx, out[18].n, b"", ONE_MEGABYTE * 305, CScript([OP_FALSE, OP_RETURN, bytearray([42] * (ONE_MEGABYTE * 305))]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow])
        txHashes.append([txOverflow.hash, txOverflow.sha256])

        txOverflow = create_transaction(out[19].tx, out[19].n, b"", ONE_MEGABYTE, CScript([OP_FALSE, OP_RETURN, bytearray([42] * ONE_MEGABYTE)]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow])
        txHashes.append([txOverflow.hash, txOverflow.sha256])

        # Mine block with new transactions.
        self.log.info("BLOCK 2 - mining")
        minedBlock2 = node.generate(1)
        self.log.info("BLOCK 2 - mined")

        for txHash in txHashes:
            tx = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txHash[0]))
            tx.rehash()
            assert_equal(tx.sha256, txHash[1])
Exemplo n.º 10
0
 def run_allowip_test(self, allow_ips, rpchost, rpcport):
     '''
     Start a node with rpcallow IP, and request getnetworkinfo
     at a non-localhost IP.
     '''
     self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
     base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
     self.nodes[0].rpchost = None
     self.start_nodes([base_args])
     # connect to node through non-loopback interface
     node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
     node.getnetworkinfo()
     self.stop_nodes()
Exemplo n.º 11
0
 def run_allowip_test(self, allow_ips, rpchost, rpcport):
     '''
     Start a node with rpcallow IP, and request getnetworkinfo
     at a non-localhost IP.
     '''
     self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
     base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
     self.nodes[0].rpchost = None
     self.start_nodes([base_args])
     # connect to node through non-loopback interface
     node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
     node.getnetworkinfo()
     self.stop_nodes()
Exemplo n.º 12
0
 def run_allowip_test(self, allow_ips, rpchost, rpcport):
     '''
     Start a node with rpcwallow IP, and request getinfo
     at a non-localhost IP.
     '''
     base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
     self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
     try:
         # connect to node through non-loopback interface
         node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
         node.getinfo()
     finally:
         node = None # make sure connection will be garbage collected and closed
         stop_nodes(self.nodes)
         wait_bitcoinds()
Exemplo n.º 13
0
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
    '''
    Start a node with rpcwallow IP, and request getinfo
    at a non-localhost IP.
    '''
    base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
    nodes = start_nodes(1, tmpdir, [base_args])
    try:
        # connect to node through non-loopback interface
        url = "http://*****:*****@%s:%d" % (rpchost, rpcport,)
        node = get_rpc_proxy(url, 1)
        node.getinfo()
    finally:
        node = None # make sure connection will be garbage collected and closed
        stop_nodes(nodes)
        wait_bitcoinds()
Exemplo n.º 14
0
 def run_allowip_test(self, allow_ips, rpchost, rpcport):
     '''
     Start a node with rpcallow IP, and request getnetworkinfo
     at a non-localhost IP.
     '''
     self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
     node_args = \
         ['-disablewallet', '-nolisten'] + \
         ['-rpcallowip='+x for x in allow_ips] + \
         ['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
     self.nodes[0].rpchost = None
     self.start_nodes([node_args])
     # connect to node through non-loopback interface
     node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
     node.getnetworkinfo()
     self.stop_nodes()
Exemplo n.º 15
0
 def run_allowip_test(self, allow_ips, rpchost, rpcport):
     '''
     Start a node with rpcallow IP, and request getnetworkinfo
     at a non-localhost IP.
     '''
     self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
     node_args = \
         ['-disablewallet', '-nolisten'] + \
         ['-rpcallowip='+x for x in allow_ips] + \
         ['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
     self.nodes[0].rpchost = None
     self.start_nodes([node_args])
     # connect to node through non-loopback interface
     node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
     node.getnetworkinfo()
     self.stop_nodes()
Exemplo n.º 16
0
    def run_test(self):
        # Generate for BTC
        assert_equal(self.nodes[0].getbalance(), 0)
        assert_equal(self.nodes[1].getbalance(), 0)
        self.nodes[0].generate(300)
        assert_equal(self.nodes[1].getbalance(), 0)
        # Make blocks with spam to cause rescan delay
        for i in range(5):
            for j in range(5):
                self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.1)
            self.nodes[0].generate(10)
        addr = self.nodes[0].getnewaddress()
        privkey = self.nodes[0].dumpprivkey(addr)
        self.nodes[0].sendtoaddress(addr, 0.123)
        self.nodes[0].generate(10)  # mature tx
        self.sync_all()

        # Import this address in the background ...
        node1ref = get_rpc_proxy(self.nodes[1].url, 1, timeout=600)
        importthread = threading.Thread(target=node1ref.importprivkey,
                                        args=[privkey])
        importthread.start()
        # ... then abort rescan; try a bunch until abortres becomes true,
        # because we will start checking before above thread starts processing
        for i in range(2000):
            time.sleep(0.001)
            abortres = self.nodes[1].abortrescan()
            if abortres: break
        assert abortres  # if false, we failed to abort
        # import should die soon
        for i in range(10):
            time.sleep(0.1)
            deadres = not importthread.isAlive()
            if deadres: break

        assert deadres  # if false, importthread did not die soon enough
        assert_equal(self.nodes[1].getbalance(), 0.0)

        # Import a different address and let it run
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(
            self.nodes[0].getnewaddress()))
        # Expect original privkey to now also be discovered and added to balance
        assert_equal(self.nodes[1].getbalance(), Decimal("0.123"))
Exemplo n.º 17
0
    def run_test(self):
        # Generate for BTC
        assert_equal(self.nodes[0].getbalance(), 0)
        assert_equal(self.nodes[1].getbalance(), 0)
        self.nodes[0].generate(300)
        assert_equal(self.nodes[1].getbalance(), 0)
        # Make blocks with spam to cause rescan delay
        for i in range(5):
            for j in range(5):
                self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.1)
            self.nodes[0].generate(10)
        addr = self.nodes[0].getnewaddress()
        privkey = self.nodes[0].dumpprivkey(addr)
        self.nodes[0].sendtoaddress(addr, 0.123)
        self.nodes[0].generate(10) # mature tx
        self.sync_all()

        # Import this address in the background ...
        node1ref = get_rpc_proxy(self.nodes[1].url, 1, timeout=600)
        importthread = threading.Thread(target=node1ref.importprivkey, args=[privkey])
        importthread.start()
        # ... then abort rescan; try a bunch until abortres becomes true,
        # because we will start checking before above thread starts processing
        for i in range(2000):
            time.sleep(0.001)
            abortres = self.nodes[1].abortrescan()
            if abortres: break
        assert abortres # if false, we failed to abort
        # import should die soon
        for i in range(10):
            time.sleep(0.1)
            deadres = not importthread.isAlive()
            if deadres: break

        assert deadres # if false, importthread did not die soon enough
        assert_equal(self.nodes[1].getbalance(), 0.0)

        # Import a different address and let it run
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()))
        # Expect original privkey to now also be discovered and added to balance
        assert_equal(self.nodes[1].getbalance(), Decimal("0.123"))
Exemplo n.º 18
0
    def run_test(self):
        node = self.nodes[0]

        data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
        wallet_dir = lambda *p: data_dir('wallets', *p)
        wallet = lambda name: node.get_wallet_rpc(name)

        def wallet_file(name):
            if os.path.isdir(wallet_dir(name)):
                return wallet_dir(name, "wallet.dat")
            return wallet_dir(name)

        assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })

        # check wallet.dat is created
        self.stop_nodes()
        assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)

        # create symlink to verify wallet directory path can be referenced
        # through symlink
        os.mkdir(wallet_dir('w7'))
        os.symlink('w7', wallet_dir('w7_symlink'))

        # rename wallet.dat to make sure plain wallet file paths (as opposed to
        # directory paths) can be loaded
        os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))

        # create another dummy wallet for use in testing backups later
        self.start_node(0, [])
        self.stop_nodes()
        empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
        os.rename(wallet_dir("wallet.dat"), empty_wallet)

        # restart node with a mix of wallet names:
        #   w1, w2, w3 - to verify new wallets created when non-existing paths specified
        #   w          - to verify wallet name matching works when one wallet path is prefix of another
        #   sub/w5     - to verify relative wallet path is created correctly
        #   extern/w6  - to verify absolute wallet path is created correctly
        #   w7_symlink - to verify symlinked wallet path is initialized correctly
        #   w8         - to verify existing wallet file is loaded correctly
        #   ''         - to verify default wallet file is created correctly
        wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
        extra_args = ['-wallet={}'.format(n) for n in wallet_names]
        self.start_node(0, extra_args)
        assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])

        assert_equal(set(node.listwallets()), set(wallet_names))

        # check that all requested wallets were created
        self.stop_node(0)
        for wallet_name in wallet_names:
            assert_equal(os.path.isfile(wallet_file(wallet_name)), True)

        # should not initialize if wallet path can't be created
        exp_stderr = "boost::filesystem::create_directory:"
        self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)

        self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
        self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
        self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())

        # should not initialize if there are duplicate wallets
        self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')

        # should not initialize if one wallet is a copy of another
        shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
        exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
        self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)

        # should not initialize if wallet file is a symlink
        os.symlink('w8', wallet_dir('w8_symlink'))
        self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)

        # should not initialize if the specified walletdir does not exist
        self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
        # should not initialize if the specified walletdir is not a directory
        not_a_dir = wallet_dir('notadir')
        open(not_a_dir, 'a', encoding="utf8").close()
        self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')

        # if wallets/ doesn't exist, datadir should be the default wallet dir
        wallet_dir2 = data_dir('walletdir')
        os.rename(wallet_dir(), wallet_dir2)
        self.start_node(0, ['-wallet=w4', '-wallet=w5'])
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        node.generatetoaddress(nblocks=1, address=w5.getnewaddress())

        # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
        os.rename(wallet_dir2, wallet_dir())
        self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        w5_info = w5.getwalletinfo()
        assert_equal(w5_info['immature_balance'], 50)

        competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
        os.mkdir(competing_wallet_dir)
        self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
        exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
        self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)

        self.restart_node(0, extra_args)

        assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])

        wallets = [wallet(w) for w in wallet_names]
        wallet_bad = wallet("bad")

        # check wallet names and balances
        node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
        for wallet_name, wallet in zip(wallet_names, wallets):
            info = wallet.getwalletinfo()
            assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
            assert_equal(info['walletname'], wallet_name)

        # accessing invalid wallet fails
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)

        # accessing wallet RPC without using wallet endpoint fails
        assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)

        w1, w2, w3, w4, *_ = wallets
        node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
        assert_equal(w1.getbalance(), 100)
        assert_equal(w2.getbalance(), 0)
        assert_equal(w3.getbalance(), 0)
        assert_equal(w4.getbalance(), 0)

        w1.sendtoaddress(w2.getnewaddress(), 1)
        w1.sendtoaddress(w3.getnewaddress(), 2)
        w1.sendtoaddress(w4.getnewaddress(), 3)
        node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
        assert_equal(w2.getbalance(), 1)
        assert_equal(w3.getbalance(), 2)
        assert_equal(w4.getbalance(), 3)

        batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
        assert_equal(batch[0]["result"]["chain"], self.chain)
        assert_equal(batch[1]["result"]["walletname"], "w1")

        self.log.info('Check for per-wallet settxfee call')
        assert_equal(w1.getwalletinfo()['paytxfee'], 0)
        assert_equal(w2.getwalletinfo()['paytxfee'], 0)
        w2.settxfee(0.001)
        assert_equal(w1.getwalletinfo()['paytxfee'], 0)
        assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))

        self.log.info("Test dynamic wallet loading")

        self.restart_node(0, ['-nowallet'])
        assert_equal(node.listwallets(), [])
        assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)

        self.log.info("Load first wallet")
        loadwallet_name = node.loadwallet(wallet_names[0])
        assert_equal(loadwallet_name['name'], wallet_names[0])
        assert_equal(node.listwallets(), wallet_names[0:1])
        node.getwalletinfo()
        w1 = node.get_wallet_rpc(wallet_names[0])
        w1.getwalletinfo()

        self.log.info("Load second wallet")
        loadwallet_name = node.loadwallet(wallet_names[1])
        assert_equal(loadwallet_name['name'], wallet_names[1])
        assert_equal(node.listwallets(), wallet_names[0:2])
        assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
        w2 = node.get_wallet_rpc(wallet_names[1])
        w2.getwalletinfo()

        self.log.info("Concurrent wallet loading")
        threads = []
        for _ in range(3):
            n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
            t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()
        global got_loading_error
        assert_equal(got_loading_error, True)

        self.log.info("Load remaining wallets")
        for wallet_name in wallet_names[2:]:
            loadwallet_name = self.nodes[0].loadwallet(wallet_name)
            assert_equal(loadwallet_name['name'], wallet_name)

        assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))

        # Fail to load if wallet doesn't exist
        assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')

        # Fail to load duplicate wallets
        assert_raises_rpc_error(-4, 'Wallet file verification failed. Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])

        # Fail to load duplicate wallets by different ways (directory and filepath)
        assert_raises_rpc_error(-4, "Wallet file verification failed. Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')

        # Fail to load if one wallet is a copy of another
        assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')

        # Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
        assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')


        # Fail to load if wallet file is a symlink
        assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')

        # Fail to load if a directory is specified that doesn't contain a wallet
        os.mkdir(wallet_dir('empty_wallet_dir'))
        assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')

        self.log.info("Test dynamic wallet creation.")

        # Fail to create a wallet if it already exists.
        assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')

        # Successfully create a wallet with a new name
        loadwallet_name = self.nodes[0].createwallet('w9')
        assert_equal(loadwallet_name['name'], 'w9')
        w9 = node.get_wallet_rpc('w9')
        assert_equal(w9.getwalletinfo()['walletname'], 'w9')

        assert 'w9' in self.nodes[0].listwallets()

        # Successfully create a wallet using a full path
        new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
        new_wallet_name = os.path.join(new_wallet_dir, 'w10')
        loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
        assert_equal(loadwallet_name['name'], new_wallet_name)
        w10 = node.get_wallet_rpc(new_wallet_name)
        assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)

        assert new_wallet_name in self.nodes[0].listwallets()

        self.log.info("Test dynamic wallet unloading")

        # Test `unloadwallet` errors
        assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
        assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),

        # Successfully unload the specified wallet name
        self.nodes[0].unloadwallet("w1")
        assert 'w1' not in self.nodes[0].listwallets()

        # Successfully unload the wallet referenced by the request endpoint
        # Also ensure unload works during walletpassphrase timeout
        w2.encryptwallet('test')
        w2.walletpassphrase('test', 1)
        w2.unloadwallet()
        time.sleep(1.1)
        assert 'w2' not in self.nodes[0].listwallets()

        # Successfully unload all wallets
        for wallet_name in self.nodes[0].listwallets():
            self.nodes[0].unloadwallet(wallet_name)
        assert_equal(self.nodes[0].listwallets(), [])
        assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)

        # Successfully load a previously unloaded wallet
        self.nodes[0].loadwallet('w1')
        assert_equal(self.nodes[0].listwallets(), ['w1'])
        assert_equal(w1.getwalletinfo()['walletname'], 'w1')

        assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])

        # Test backing up and restoring wallets
        self.log.info("Test wallet backup")
        self.restart_node(0, ['-nowallet'])
        for wallet_name in wallet_names:
            self.nodes[0].loadwallet(wallet_name)
        for wallet_name in wallet_names:
            rpc = self.nodes[0].get_wallet_rpc(wallet_name)
            addr = rpc.getnewaddress()
            backup = os.path.join(self.options.tmpdir, 'backup.dat')
            rpc.backupwallet(backup)
            self.nodes[0].unloadwallet(wallet_name)
            shutil.copyfile(empty_wallet, wallet_file(wallet_name))
            self.nodes[0].loadwallet(wallet_name)
            assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
            self.nodes[0].unloadwallet(wallet_name)
            shutil.copyfile(backup, wallet_file(wallet_name))
            self.nodes[0].loadwallet(wallet_name)
            assert_equal(rpc.getaddressinfo(addr)['ismine'], True)

        # Test .walletlock file is closed
        self.start_node(1)
        wallet = os.path.join(self.options.tmpdir, 'my_wallet')
        self.nodes[0].createwallet(wallet)
        assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
        self.nodes[0].unloadwallet(wallet)
        self.nodes[1].loadwallet(wallet)
Exemplo n.º 19
0
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url, 1, timeout=6000, coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash( int(node.getbestblockhash(), 16) )

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 200, 200)

        yield test

        # Create transaction that will almost fill block file when next block will be generated (~130 MB)
        tx1 = create_transaction(out[0].tx, out[0].n, b"", ONE_MEGABYTE * 120,  CScript([OP_TRUE, OP_RETURN, bytearray([42] * (ONE_MEGABYTE * 120))]))
        self.test.connections[0].send_message(msg_tx(tx1))        
        # Wait for transaction processing        
        self.check_mempool(node, [tx1], timeout=6000)

        # Mine block with new transaction.
        minedBlock1 = node.generate(1)
        
        # Send 4 large (~1GB) transactions that will go into next block 
        for i in range(4):
            txLarge = create_transaction(out[1 + i].tx, out[1 + i].n, b"", ONE_GIGABYTE, CScript([OP_TRUE, OP_RETURN, bytearray([42] * (ONE_GIGABYTE - ONE_MEGABYTE))]))
            self.test.connections[0].send_message(msg_tx(txLarge))  
            self.check_mempool(node, [txLarge], timeout=6000)

        # Set overflow size so that we get block size of exactly max 32 bit number (0xFFFFFFFF)
        txOverflowSize = ((ONE_MEGABYTE * 298) + 966751)
        txOverflow = create_transaction(out[5].tx, out[5].n, b"", ONE_MEGABYTE * 305, CScript([OP_TRUE, OP_RETURN, bytearray([42] * txOverflowSize)]))
        self.test.connections[0].send_message(msg_tx(txOverflow))  
        self.check_mempool(node, [txOverflow], timeout=6000)
        
        # Mine block with new transactions.        
        minedBlock2 = node.generate(1) 

        txLast = create_transaction(out[6].tx, out[6].n, b"", ONE_MEGABYTE, CScript([OP_TRUE, OP_RETURN, bytearray([42] * (ONE_MEGABYTE))]))
        self.test.connections[0].send_message(msg_tx(txLast))  
        self.check_mempool(node, [txLast], timeout=6000)

        # Mine block with new transaction.
        minedBlock3 = node.generate(1)                

        # Restart node to make sure that the index is written to disk
        self.stop_nodes()
        self.nodes[0].rpc_timeout = 6000
        self.start_nodes(self.extra_args)
     
        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url, 1, timeout=6000, coveragedir=self.nodes[0].coverage_dir)     

        # Verify that blocks were correctly written / read
        blockDetails1 = node.getblock(minedBlock1[0])
        blockDetails2 = node.getblock(minedBlock2[0])
        blockDetails3 = node.getblock(minedBlock3[0])
        
        assert_equal(minedBlock1[0], blockDetails1['hash'])
        assert_equal(minedBlock2[0], blockDetails2['hash'])
        assert_equal(minedBlock3[0], blockDetails3['hash'])
                
        for txId in blockDetails1['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])

        for txId in blockDetails2['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])
        
        for txId in blockDetails3['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])                
Exemplo n.º 20
0
 def run(self):
   self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
Exemplo n.º 21
0
    def get_tests(self):

        # Shorthand for functions
        block = self.chain.next_block

        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 200, 200)

        yield test

        # Create transaction that will almost fill block file when next block will be generated (~130 MB)
        tx1 = create_transaction(
            out[0].tx, out[0].n, b"", ONE_MEGABYTE * 120,
            CScript(
                [OP_FALSE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 120))]))
        self.test.connections[0].send_message(msg_tx(tx1))
        # Wait for transaction processing
        self.check_mempool(node, [tx1], timeout=6000)
        # Mine block with new transaction.
        minedBlock1 = node.generate(1)

        # Send 4 large (~1GB) transactions that will go into next block
        for i in range(4):
            txLarge = create_transaction(
                out[1 + i].tx, out[1 + i].n, b"", ONE_GIGABYTE,
                CScript([
                    OP_FALSE, OP_RETURN,
                    bytearray([42] * (ONE_GIGABYTE - ONE_MEGABYTE))
                ]))
            self.test.connections[0].send_message(msg_tx(txLarge))
            self.check_mempool(node, [txLarge], timeout=6000)
        # Send transaction with size that will overflow 32 bit size
        txOverflow = create_transaction(
            out[5].tx, out[5].n, b"", ONE_MEGABYTE * 305,
            CScript(
                [OP_FALSE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 305))]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow], timeout=6000)
        # Mine block with new transactions with size > 4GB. This will write to new block file on disk.
        minedBlock2 = node.generate(1)
        # Make sure that block is larger than 32 bit max
        assert_greater_than(node.getblock(minedBlock2[0], True)['size'], 2**32)

        # Generate another transaction for next block
        tx2 = create_transaction(
            out[10].tx, out[10].n, b"", ONE_MEGABYTE,
            CScript([OP_FALSE, OP_RETURN,
                     bytearray([42] * (ONE_MEGABYTE))]))
        self.test.connections[0].send_message(msg_tx(tx2))
        self.check_mempool(node, [tx2], timeout=6000)
        # Mine block with new transactions. This will write to new block file on disk.
        minedBlock3 = node.generate(1)

        # Get block count
        blockcount = node.getblockcount()

        # Restart node with reindex option
        self.stop_nodes()
        self.extra_args[0].append("-reindex")
        self.start_nodes(self.extra_args)

        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        # Get block count after reindex and compare it to block count before node shutdown - should be equal
        while node.getblockcount() < blockcount:
            sleep(0.1)
        assert_equal(node.getblockcount(), blockcount)
Exemplo n.º 22
0
    def run_test(self):
        node = self.nodes[0]

        data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
        wallet_dir = lambda *p: data_dir('wallets', *p)
        wallet = lambda name: node.get_wallet_rpc(name)

        def wallet_file(name):
            if name == self.default_wallet_name:
                return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
            if os.path.isdir(wallet_dir(name)):
                return wallet_dir(name, "wallet.dat")
            return wallet_dir(name)

        assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})

        # check wallet.dat is created
        self.stop_nodes()
        assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)

        # create symlink to verify wallet directory path can be referenced
        # through symlink
        os.mkdir(wallet_dir('w7'))
        os.symlink('w7', wallet_dir('w7_symlink'))

        os.symlink('..', wallet_dir('recursive_dir_symlink'))

        os.mkdir(wallet_dir('self_walletdat_symlink'))
        os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))

        # rename wallet.dat to make sure plain wallet file paths (as opposed to
        # directory paths) can be loaded
        # create another dummy wallet for use in testing backups later
        self.start_node(0)
        node.createwallet("empty")
        node.createwallet("plain")
        node.createwallet("created")
        self.stop_nodes()
        empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
        os.rename(wallet_file("empty"), empty_wallet)
        shutil.rmtree(wallet_dir("empty"))
        empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
        os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
        shutil.rmtree(wallet_dir("created"))
        os.rename(wallet_file("plain"), wallet_dir("w8"))
        shutil.rmtree(wallet_dir("plain"))

        # restart node with a mix of wallet names:
        #   w1, w2, w3 - to verify new wallets created when non-existing paths specified
        #   w          - to verify wallet name matching works when one wallet path is prefix of another
        #   sub/w5     - to verify relative wallet path is created correctly
        #   extern/w6  - to verify absolute wallet path is created correctly
        #   w7_symlink - to verify symlinked wallet path is initialized correctly
        #   w8         - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
        #   ''         - to verify default wallet file is created correctly
        to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
        in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create]  # Wallets in the wallet dir
        in_wallet_dir.append('w7')  # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
        to_create.append(os.path.join(self.options.tmpdir, 'extern/w6'))  # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
        to_load = [self.default_wallet_name]
        if not self.options.descriptors:
            to_load.append('w8')
        wallet_names = to_create + to_load  # Wallet names loaded in the wallet
        in_wallet_dir += to_load  # The loaded wallets are also in the wallet dir
        self.start_node(0)
        for wallet_name in to_create:
            self.nodes[0].createwallet(wallet_name)
        for wallet_name in to_load:
            self.nodes[0].loadwallet(wallet_name)

        os.mkdir(wallet_dir('no_access'))
        os.chmod(wallet_dir('no_access'), 0)
        try:
            with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
                walletlist = self.nodes[0].listwalletdir()['wallets']
        finally:
            # Need to ensure access is restored for cleanup
            os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
        assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))

        assert_equal(set(node.listwallets()), set(wallet_names))

        # should raise rpc error if wallet path can't be created
        err_code = -4 if self.options.descriptors else -1
        assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")

        # check that all requested wallets were created
        self.stop_node(0)
        for wallet_name in wallet_names:
            assert_equal(os.path.isfile(wallet_file(wallet_name)), True)

        self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
        self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
        self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())

        self.start_node(0, ['-wallet=w1', '-wallet=w1'])
        self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')

        if not self.options.descriptors:
            # Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
            # should not initialize if one wallet is a copy of another
            shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
            in_wallet_dir.append('w8_copy')
            exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
            self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)

        # should not initialize if wallet file is a symlink
        os.symlink('w8', wallet_dir('w8_symlink'))
        self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)

        # should not initialize if the specified walletdir does not exist
        self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
        # should not initialize if the specified walletdir is not a directory
        not_a_dir = wallet_dir('notadir')
        open(not_a_dir, 'a', encoding="utf8").close()
        self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')

        self.log.info("Do not allow -upgradewallet with multiwallet")
        self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")

        # if wallets/ doesn't exist, datadir should be the default wallet dir
        wallet_dir2 = data_dir('walletdir')
        os.rename(wallet_dir(), wallet_dir2)
        self.start_node(0)
        self.nodes[0].createwallet("w4")
        self.nodes[0].createwallet("w5")
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        node.generatetoaddress(nblocks=1, address=w5.getnewaddress())

        # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
        os.rename(wallet_dir2, wallet_dir())
        self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
        self.nodes[0].loadwallet("w4")
        self.nodes[0].loadwallet("w5")
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        w5_info = w5.getwalletinfo()
        assert_equal(w5_info['immature_balance'], 50)

        competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
        os.mkdir(competing_wallet_dir)
        self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
        self.nodes[0].createwallet(self.default_wallet_name)
        if self.options.descriptors:
            exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another shitecoind?"
        else:
            exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
        self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)

        self.restart_node(0)
        for wallet_name in wallet_names:
            self.nodes[0].loadwallet(wallet_name)

        assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))

        wallets = [wallet(w) for w in wallet_names]
        wallet_bad = wallet("bad")

        # check wallet names and balances
        node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
        for wallet_name, wallet in zip(wallet_names, wallets):
            info = wallet.getwalletinfo()
            assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
            assert_equal(info['walletname'], wallet_name)

        # accessing invalid wallet fails
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)

        # accessing wallet RPC without using wallet endpoint fails
        assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)

        w1, w2, w3, w4, *_ = wallets
        node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
        assert_equal(w1.getbalance(), 100)
        assert_equal(w2.getbalance(), 0)
        assert_equal(w3.getbalance(), 0)
        assert_equal(w4.getbalance(), 0)

        w1.sendtoaddress(w2.getnewaddress(), 1)
        w1.sendtoaddress(w3.getnewaddress(), 2)
        w1.sendtoaddress(w4.getnewaddress(), 3)
        node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
        assert_equal(w2.getbalance(), 1)
        assert_equal(w3.getbalance(), 2)
        assert_equal(w4.getbalance(), 3)

        batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
        assert_equal(batch[0]["result"]["chain"], self.chain)
        assert_equal(batch[1]["result"]["walletname"], "w1")

        self.log.info('Check for per-wallet settxfee call')
        assert_equal(w1.getwalletinfo()['paytxfee'], 0)
        assert_equal(w2.getwalletinfo()['paytxfee'], 0)
        w2.settxfee(0.001)
        assert_equal(w1.getwalletinfo()['paytxfee'], 0)
        assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))

        self.log.info("Test dynamic wallet loading")

        self.restart_node(0, ['-nowallet'])
        assert_equal(node.listwallets(), [])
        assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)

        self.log.info("Load first wallet")
        loadwallet_name = node.loadwallet(wallet_names[0])
        assert_equal(loadwallet_name['name'], wallet_names[0])
        assert_equal(node.listwallets(), wallet_names[0:1])
        node.getwalletinfo()
        w1 = node.get_wallet_rpc(wallet_names[0])
        w1.getwalletinfo()

        self.log.info("Load second wallet")
        loadwallet_name = node.loadwallet(wallet_names[1])
        assert_equal(loadwallet_name['name'], wallet_names[1])
        assert_equal(node.listwallets(), wallet_names[0:2])
        assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
        w2 = node.get_wallet_rpc(wallet_names[1])
        w2.getwalletinfo()

        self.log.info("Concurrent wallet loading")
        threads = []
        for _ in range(3):
            n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
            t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()
        global got_loading_error
        assert_equal(got_loading_error, True)

        self.log.info("Load remaining wallets")
        for wallet_name in wallet_names[2:]:
            loadwallet_name = self.nodes[0].loadwallet(wallet_name)
            assert_equal(loadwallet_name['name'], wallet_name)

        assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))

        # Fail to load if wallet doesn't exist
        path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
        assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')

        # Fail to load duplicate wallets
        path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
        if self.options.descriptors:
            assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another shitecoind?", self.nodes[0].loadwallet, wallet_names[0])
        else:
            assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])

            # This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
            # Fail to load duplicate wallets by different ways (directory and filepath)
            path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
            assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')

            # Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
            # Fail to load if one wallet is a copy of another
            assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')

            # Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
            assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')

        # Fail to load if wallet file is a symlink
        assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')

        # Fail to load if a directory is specified that doesn't contain a wallet
        os.mkdir(wallet_dir('empty_wallet_dir'))
        path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
        assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')

        self.log.info("Test dynamic wallet creation.")

        # Fail to create a wallet if it already exists.
        path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
        assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')

        # Successfully create a wallet with a new name
        loadwallet_name = self.nodes[0].createwallet('w9')
        in_wallet_dir.append('w9')
        assert_equal(loadwallet_name['name'], 'w9')
        w9 = node.get_wallet_rpc('w9')
        assert_equal(w9.getwalletinfo()['walletname'], 'w9')

        assert 'w9' in self.nodes[0].listwallets()

        # Successfully create a wallet using a full path
        new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
        new_wallet_name = os.path.join(new_wallet_dir, 'w10')
        loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
        assert_equal(loadwallet_name['name'], new_wallet_name)
        w10 = node.get_wallet_rpc(new_wallet_name)
        assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)

        assert new_wallet_name in self.nodes[0].listwallets()

        self.log.info("Test dynamic wallet unloading")

        # Test `unloadwallet` errors
        assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
        assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
        assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),

        # Successfully unload the specified wallet name
        self.nodes[0].unloadwallet("w1")
        assert 'w1' not in self.nodes[0].listwallets()

        # Unload w1 again, this time providing the wallet name twice
        self.nodes[0].loadwallet("w1")
        assert 'w1' in self.nodes[0].listwallets()
        w1.unloadwallet("w1")
        assert 'w1' not in self.nodes[0].listwallets()

        # Successfully unload the wallet referenced by the request endpoint
        # Also ensure unload works during walletpassphrase timeout
        w2.encryptwallet('test')
        w2.walletpassphrase('test', 1)
        w2.unloadwallet()
        time.sleep(1.1)
        assert 'w2' not in self.nodes[0].listwallets()

        # Successfully unload all wallets
        for wallet_name in self.nodes[0].listwallets():
            self.nodes[0].unloadwallet(wallet_name)
        assert_equal(self.nodes[0].listwallets(), [])
        assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)

        # Successfully load a previously unloaded wallet
        self.nodes[0].loadwallet('w1')
        assert_equal(self.nodes[0].listwallets(), ['w1'])
        assert_equal(w1.getwalletinfo()['walletname'], 'w1')

        assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))

        # Test backing up and restoring wallets
        self.log.info("Test wallet backup")
        self.restart_node(0, ['-nowallet'])
        for wallet_name in wallet_names:
            self.nodes[0].loadwallet(wallet_name)
        for wallet_name in wallet_names:
            rpc = self.nodes[0].get_wallet_rpc(wallet_name)
            addr = rpc.getnewaddress()
            backup = os.path.join(self.options.tmpdir, 'backup.dat')
            if os.path.exists(backup):
                os.unlink(backup)
            rpc.backupwallet(backup)
            self.nodes[0].unloadwallet(wallet_name)
            shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
            self.nodes[0].loadwallet(wallet_name)
            assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
            self.nodes[0].unloadwallet(wallet_name)
            shutil.copyfile(backup, wallet_file(wallet_name))
            self.nodes[0].loadwallet(wallet_name)
            assert_equal(rpc.getaddressinfo(addr)['ismine'], True)

        # Test .walletlock file is closed
        self.start_node(1)
        wallet = os.path.join(self.options.tmpdir, 'my_wallet')
        self.nodes[0].createwallet(wallet)
        if self.options.descriptors:
            assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
        else:
            assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
        self.nodes[0].unloadwallet(wallet)
        self.nodes[1].loadwallet(wallet)
Exemplo n.º 23
0
 def run_test(self):
    node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
    Thread(target=test_long_call, args=(node,)).start()
    # wait 1 second to ensure event loop waits for current connections to close
    self.stop_node(0, wait=1000)
Exemplo n.º 24
0
sys.path.append("..")
from test_framework.util import get_rpc_proxy
""" This tool must be used at current directory.

Usage: python3 rpc_client_http.py [url] method_name [method_args]*

for example:
    - Get epoch number:     python3 rpc_client_http.py cfx_epochNumber
    - Get last mined epoch: python3 rpc_client_http.py cfx_epochNumber latest_mined
    - Specify RPC URL:      python3 rpc_client_http.py http://localhost:8888 cfx_epochNumber

Note, when URL specified, it should be of format http://ip:port."""

assert len(
    sys.argv
) > 1, "Parameter required: [<url: http://ip:port>] <method_name> [<method_args>*]"

rpc_url = "http://localhost:12537"
method_name = sys.argv[1]
method_args = sys.argv[2:]

if sys.argv[1].lower().startswith("http://"):
    rpc_url = sys.argv[1]
    assert len(sys.argv) > 2, "method_name not specified"
    method_name = sys.argv[2]
    method_args = sys.argv[3:]

node = get_rpc_proxy(rpc_url, 3)
method_args = ["\"" + arg + "\"" for arg in method_args]
rpc = "node.{}({})".format(method_name, ", ".join(method_args))
print(json.dumps(eval(rpc), indent=4))
Exemplo n.º 25
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection0)

        node1 = NodeConnCB()
        connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection1)

        # *** Prepare node connection for early announcements testing
        node2 = NodeConnCB()
        node2.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # *** Activate early announcement functionality for this connection
        #     After this point the early announcements are not received yet -
        #     we still need to set latest announced block (CNode::pindexBestKnownBlock)
        #     which is set for e.g. by calling best headers message with locator
        #     set to non-null
        node2.wait_for_verack()
        node2.send_message(msg_sendcmpct(announce=True))

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, outs, block_count = prepare_init_chain(self.chain,
                                                  101,
                                                  1,
                                                  block_0=False,
                                                  start_block=0,
                                                  node=node0)
        out = outs[0]

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count,
                                            spend=out,
                                            extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count,
                                              spend=out,
                                              extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count,
                                            spend=out,
                                            extra_txns=10)
        block_count += 1

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash},
                                self.nodes[0], self.log)

        # *** Complete early announcement setup by sending getheaders message
        #     with a non-null locator (pointing to the last block that we know
        #     of on python side - we claim that we know of all the blocks that
        #     bitcoind node knows of)
        #
        #     We also set on_cmpctblock handler as early announced blocks are
        #     announced via compact block messages instead of inv messages
        node2.send_and_ping(
            msg_getheaders(
                locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block2_hard.sha256

        def on_cmpctblock(conn, message):
            nonlocal receivedAnnouncement
            message.header_and_shortids.header.calc_sha256()
            if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
                receivedAnnouncement = True

        node2.on_cmpctblock = on_cmpctblock

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # *** make sure that we receive announcement of the block before it has
        #     been validated
        wait_until(lambda: receivedAnnouncement)

        # making rpc call submitblock in a separate thread because waitaftervalidation is blocking
        # the return of submitblock
        submitblock_thread = threading.Thread(target=self.nodes[0].submitblock,
                                              args=(ToHex(block4_hard), ))
        submitblock_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(
            get_datadir_path(self.options.tmpdir, 0), 0),
                                   0,
                                   coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash},
                                   rpc_client, self.log)

        # *** prepare to intercept block3_easier announcement - it will not be
        #     announced before validation is complete as early announcement is
        #     limited to announcing one block per height (siblings are ignored)
        #     but after validation is complete we should still get the announcing
        #     compact block message
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block3_easier.sha256

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        # *** Make sure that we receive compact block announcement of the block
        #     after the validation is complete even though it was not the first
        #     block that was received by bitcoind node.
        #
        #     Also make sure that we receive inv announcement of the block after
        #     the validation is complete by the nodes that are not using early
        #     announcement functionality.
        wait_until(lambda: receivedAnnouncement)
        node0.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])
        # node 1 was the sender but receives inv for block non the less
        # (with early announcement that's not the case - sender does not receive the announcement)
        node1.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitblock_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 26
0
def new_client(rpc_url):
    return RpcClient(node=get_rpc_proxy(rpc_url, 3))
    def get_tests(self):

        # Shorthand for functions
        block = self.chain.next_block

        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        # Create a new block
        block(0)

        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(200):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # Collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(200):
            out.append(self.chain.get_spendable_output())

        # Create transaction that will almost fill block file when next block will be generated (~130 MB)
        tx1 = create_transaction(
            out[0].tx, out[0].n, b"", ONE_MEGABYTE * 120,
            CScript(
                [OP_FALSE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 120))]))
        self.test.connections[0].send_message(msg_tx(tx1))
        # Wait for transaction processing
        self.check_mempool(node, [tx1])
        # Mine block with new transaction.
        minedBlock1 = node.generate(1)

        # Send 4 large (~1GB) transactions that will go into next block
        for i in range(4):
            txLarge = create_transaction(
                out[1 + i].tx, out[1 + i].n, b"", ONE_GIGABYTE,
                CScript([
                    OP_FALSE, OP_RETURN,
                    bytearray([42] * (ONE_GIGABYTE - ONE_MEGABYTE))
                ]))
            self.test.connections[0].send_message(msg_tx(txLarge))
            self.check_mempool(node, [txLarge])
        # Send transaction with size that will overflow 32 bit size
        txOverflow = create_transaction(
            out[5].tx, out[5].n, b"", ONE_MEGABYTE * 305,
            CScript(
                [OP_FALSE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 305))]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow])
        # Mine block with new transactions with size > 4GB. This will write to new block file on disk.
        minedBlock2 = node.generate(1)
        # Make sure that block is larger than 32 bit max
        assert_greater_than(node.getblock(minedBlock2[0], True)['size'], 2**32)

        # Generate another transaction for next block
        tx2 = create_transaction(
            out[10].tx, out[10].n, b"", ONE_MEGABYTE,
            CScript([OP_FALSE, OP_RETURN,
                     bytearray([42] * (ONE_MEGABYTE))]))
        self.test.connections[0].send_message(msg_tx(tx2))
        self.check_mempool(node, [tx2])
        # Mine block with new transactions. This will write to new block file on disk.
        minedBlock3 = node.generate(1)

        # Get block count
        blockcount = node.getblockcount()

        # Restart node with reindex option
        self.stop_nodes()
        self.extra_args[0].append("-reindex")
        self.start_nodes(self.extra_args)

        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        # Get block count after reindex and compare it to block count before node shutdown - should be equal
        while node.getblockcount() < blockcount:
            sleep(0.1)
        assert_equal(node.getblockcount(), blockcount)
Exemplo n.º 28
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, outs, block_count = prepare_init_chain(self.chain, 101, 1, block_0=False, start_block=0, node=node0)
        out = outs[0]

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
        block_count += 1

        mining_candidate = self.nodes[0].getminingcandidate()
        block4_hard = self.chain.next_block(block_count)
        block4_hard.hashPrevBlock = int(mining_candidate["prevhash"], 16)
        block4_hard.nTime = mining_candidate["time"]
        block4_hard.nVersion = mining_candidate["version"]
        block4_hard.solve()

        mining_solution = {"id": mining_candidate["id"],
                           "nonce": block4_hard.nNonce,
                           "coinbase": ToHex(block4_hard.vtx[0]),
                           "time": mining_candidate["time"],
                           "version": mining_candidate["version"]}

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # making rpc call submitminingsolution in a separate thread because waitaftervalidation is blocking
        # the return of submitminingsolution
        submitminingsolution_thread = threading.Thread(target=self.nodes[0].submitminingsolution, args=(mining_solution,))
        submitminingsolution_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
                                   coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitminingsolution_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 29
0
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url, 1, timeout=6000, coveragedir=self.nodes[0].coverage_dir)

        # Create a new block
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        block(0)
        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # Collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(self.num_blocks + 1):
            out.append(self.chain.get_spendable_output())

        # Create 1GB block
        block(1, spend=out[0], block_size=1*ONE_GIGABYTE)
        yield self.accepted()

        # Create long chain of smaller blocks
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(6000 + i, spend=out[i + 1], block_size=64*ONE_KILOBYTE)
            test.blocks_and_transactions.append([self.chain.tip, True])
        yield test

        # Launch another node with config that should avoid a stall during IBD
        self.log.info("Launching extra nodes")
        self.add_node(2, extra_args = [
                                    '-whitelist=127.0.0.1',
                                    '-excessiveblocksize=%d' % (ONE_GIGABYTE * 6),
                                    '-blockmaxsize=%d' % (ONE_GIGABYTE * 6),
                                    '-maxtxsizepolicy=%d' % ONE_GIGABYTE * 2,
                                    '-maxscriptsizepolicy=0',
                                    '-rpcservertimeout=1000',
                                    '-genesisactivationheight=%d' % self.genesisactivationheight,
                                    "-txindex",
                                    "-maxtipage=0",
                                    "-blockdownloadwindow=64",
                                    "-blockstallingtimeout=6"
                                      ],
                      init_data_dir=True)
        self.start_node(2)

        # Connect the new nodes up so they do IBD
        self.log.info("Starting IBD")
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[1], 2)
        self.sync_all(timeout=120)

        # Check we didn't hit a stall for node2
        assert(not check_for_log_msg("stalling block download", self.options.tmpdir + "/node2"))
Exemplo n.º 30
0
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        # Create a new block
        block(0)

        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(200):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # Collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(200):
            out.append(self.chain.get_spendable_output())

        # Create transaction that will almost fill block file when next block will be generated (~130 MB)
        tx1 = create_transaction(
            out[0].tx, out[0].n, b"", ONE_MEGABYTE * 120,
            CScript(
                [OP_TRUE, OP_RETURN,
                 bytearray([42] * (ONE_MEGABYTE * 120))]))
        self.test.connections[0].send_message(msg_tx(tx1))
        # Wait for transaction processing
        self.check_mempool(node, [tx1])

        # Mine block with new transaction.
        minedBlock1 = node.generate(1)

        # Send 4 large (~1GB) transactions that will go into next block
        for i in range(4):
            txLarge = create_transaction(
                out[1 + i].tx, out[1 + i].n, b"", ONE_GIGABYTE,
                CScript([
                    OP_TRUE, OP_RETURN,
                    bytearray([42] * (ONE_GIGABYTE - ONE_MEGABYTE))
                ]))
            self.test.connections[0].send_message(msg_tx(txLarge))
            self.check_mempool(node, [txLarge])

        # Set overflow size so that we get block size of exactly max 32 bit number (0xFFFFFFFF)
        txOverflowSize = ((ONE_MEGABYTE * 298) + 966751)
        txOverflow = create_transaction(
            out[5].tx, out[5].n, b"", ONE_MEGABYTE * 305,
            CScript([OP_TRUE, OP_RETURN,
                     bytearray([42] * txOverflowSize)]))
        self.test.connections[0].send_message(msg_tx(txOverflow))
        self.check_mempool(node, [txOverflow])

        # Mine block with new transactions.
        minedBlock2 = node.generate(1)

        txLast = create_transaction(
            out[6].tx, out[6].n, b"", ONE_MEGABYTE,
            CScript([OP_TRUE, OP_RETURN,
                     bytearray([42] * (ONE_MEGABYTE))]))
        self.test.connections[0].send_message(msg_tx(txLast))
        self.check_mempool(node, [txLast])

        # Mine block with new transaction.
        minedBlock3 = node.generate(1)

        # Restart node to make sure that the index is written to disk
        self.stop_nodes()
        self.nodes[0].rpc_timeout = 6000
        self.start_nodes(self.extra_args)

        # Get proxy with bigger timeout
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        # Verify that blocks were correctly written / read
        blockDetails1 = node.getblock(minedBlock1[0])
        blockDetails2 = node.getblock(minedBlock2[0])
        blockDetails3 = node.getblock(minedBlock3[0])

        assert_equal(minedBlock1[0], blockDetails1['hash'])
        assert_equal(minedBlock2[0], blockDetails2['hash'])
        assert_equal(minedBlock3[0], blockDetails3['hash'])

        for txId in blockDetails1['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])

        for txId in blockDetails2['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])

        for txId in blockDetails3['tx']:
            txCopy = node.getrawtransaction(txId, 1)
            assert_equal(txId, txCopy['txid'])