Esempio n. 1
0
    def setup_network(self):
        '''
        We'll setup the network to have 3 nodes that all mine with different parameters.
        But first we need to use one node to create a lot of small low priority outputs
        which we will use to generate our transactions.
        '''
        self.nodes = []
        # Use node0 to mine blocks for input splitting
        self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
                                                              "-relaypriority=0", "-whitelist=127.0.0.1"]))

        print("This test is time consuming, please be patient")
        print("Splitting inputs to small size so we can generate low priority tx's")
        self.txouts = []
        self.txouts2 = []
        # Split a coinbase into two transaction puzzle outputs
        split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)

        # Mine
        while (len(self.nodes[0].getrawmempool()) > 0):
            self.nodes[0].generate(1)

        # Repeatedly split those 2 outputs, doubling twice for each rep
        # Use txouts to monitor the available utxo, since these won't be tracked in wallet
        reps = 0
        while (reps < 5):
            #Double txouts to txouts2
            while (len(self.txouts)>0):
                split_inputs(self.nodes[0], self.txouts, self.txouts2)
            while (len(self.nodes[0].getrawmempool()) > 0):
                self.nodes[0].generate(1)
            #Double txouts2 to txouts
            while (len(self.txouts2)>0):
                split_inputs(self.nodes[0], self.txouts2, self.txouts)
            while (len(self.nodes[0].getrawmempool()) > 0):
                self.nodes[0].generate(1)
            reps += 1
        print("Finished splitting")

        # Now we can connect the other nodes, didn't want to connect them earlier
        # so the estimates would not be affected by the splitting transactions
        # Node1 mines small blocks but that are bigger than the expected transaction rate,
        # and allows free transactions.
        # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
        # (17k is room enough for 110 or so transactions)
        self.nodes.append(start_node(1, self.options.tmpdir,
                                     ["-blockprioritysize=1500", "-blockmaxsize=18000",
                                      "-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
        connect_nodes(self.nodes[1], 0)

        # Node2 is a stingy miner, that
        # produces too small blocks (room for only 70 or so transactions)
        node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]

        self.nodes.append(start_node(2, self.options.tmpdir, node2args))
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[2], 1)

        self.is_network_split = False
        self.sync_all()
Esempio n. 2
0
    def run_test (self):
        tmpdir = self.options.tmpdir

        # Make sure can't switch off usehd after wallet creation
        self.stop_node(1)
        assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
        connect_nodes_bi(self.nodes, 0, 1)

        # Make sure we use hd, keep masterkeyid
        masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
        assert_equal(len(masterkeyid), 40)

        # Import a non-HD private key in the HD wallet
        non_hd_add = self.nodes[0].getnewaddress()
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))

        # This should be enough to keep the master key and the non-HD key 
        self.nodes[1].backupwallet(tmpdir + "/hd.bak")
        #self.nodes[1].dumpwallet(tmpdir + "/hd.dump")

        # Derive some HD addresses and remember the last
        # Also send funds to each add
        self.nodes[0].generate(101)
        hd_add = None
        num_hd_adds = 300
        for i in range(num_hd_adds):
            hd_add = self.nodes[1].getnewaddress()
            hd_info = self.nodes[1].validateaddress(hd_add)
            assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
            assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
            self.nodes[0].sendtoaddress(hd_add, 1)
            self.nodes[0].generate(1)
        self.nodes[0].sendtoaddress(non_hd_add, 1)
        self.nodes[0].generate(1)

        self.sync_all()
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        self.log.info("Restore backup ...")
        self.stop_node(1)
        os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
        #connect_nodes_bi(self.nodes, 0, 1)

        # Assert that derivation is deterministic
        hd_add_2 = None
        for _ in range(num_hd_adds):
            hd_add_2 = self.nodes[1].getnewaddress()
            hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
            assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
        assert_equal(hd_add, hd_add_2)

        # Needs rescan
        self.stop_node(1)
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
        #connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
Esempio n. 3
0
 def setup_network(self):
     args = ["-checkmempool", "-debug=mempool", "-mempooltxinputlimit=2", "-nuparams=5ba81b19:110"]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     connect_nodes(self.nodes[1], 0)
     self.is_network_split = False
     self.sync_all
Esempio n. 4
0
 def setup_network(self, split=False):
     self.nodes = []
     # Start nodes with tiny block size of 11kb
     self.nodes.append(start_node(0, self.options.tmpdir, ["-blockprioritysize=7000", "-blockmaxsize=11000", "-maxorphantx=1000", "-relaypriority=true", "-printpriority=1"]))
     self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=7000", "-blockmaxsize=11000", "-maxorphantx=1000", "-relaypriority=true", "-printpriority=1"]))
     connect_nodes(self.nodes[1], 0)
     self.is_network_split=False
     self.sync_all()
Esempio n. 5
0
 def start_three(self):
     self.nodes[0] = start_node(0, self.options.tmpdir)
     self.nodes[1] = start_node(1, self.options.tmpdir)
     self.nodes[2] = start_node(2, self.options.tmpdir)
     connect_nodes(self.nodes[0], 3)
     connect_nodes(self.nodes[1], 3)
     connect_nodes(self.nodes[2], 3)
     connect_nodes(self.nodes[2], 0)
Esempio n. 6
0
 def setup_network(self):
     args = ["-checkmempool", "-debug=mempool"]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     connect_nodes(self.nodes[1], 0)
     self.is_network_split = False
     self.sync_all
Esempio n. 7
0
 def setup_network(self):
     # Node0 will be used to test behavior of processing unrequested blocks
     # from peers which are not whitelisted, while Node1 will be used for
     # the whitelisted case.
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
                                  binary=self.options.testbinary))
     self.nodes.append(start_node(1, self.options.tmpdir,
                                  ["-debug", "-whitelist=127.0.0.1"],
                                  binary=self.options.testbinary))
Esempio n. 8
0
 def setup_network(self):
     args = ["-checkmempool", "-debug=mempool", "-blockmaxsize=4000",
         "-nuparams=5ba81b19:200", # Overwinter
         "-nuparams=76b809bb:210", # Sapling
     ]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     connect_nodes(self.nodes[1], 0)
     self.is_network_split = False
     self.sync_all
Esempio n. 9
0
 def setup_network(self, split=False):
     args = ['-regtestprotectcoinbase', '-debug=zrpcunsafe']
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     args2 = ['-regtestprotectcoinbase', '-debug=zrpcunsafe', "-mempooltxinputlimit=7"]
     self.nodes.append(start_node(2, self.options.tmpdir, args2))
     connect_nodes_bi(self.nodes,0,1)
     connect_nodes_bi(self.nodes,1,2)
     connect_nodes_bi(self.nodes,0,2)
     self.is_network_split=False
     self.sync_all()
Esempio n. 10
0
 def setup_network(self, split=False):
     args = ['-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress']
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     args2 = ['-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress', '-mempooltxinputlimit=7']
     self.nodes.append(start_node(2, self.options.tmpdir, args2))
     connect_nodes_bi(self.nodes,0,1)
     connect_nodes_bi(self.nodes,1,2)
     connect_nodes_bi(self.nodes,0,2)
     self.is_network_split=False
     self.sync_all()
Esempio n. 11
0
    def reorg_test(self):
        # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
        # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
        # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
        # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        height = self.nodes[1].getblockcount()
        print "Current block height:", height

        invalidheight = height-287
        badhash = self.nodes[1].getblockhash(invalidheight)
        print "Invalidating block at height:",invalidheight,badhash
        self.nodes[1].invalidateblock(badhash)

        # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
        # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
        mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
        curhash = self.nodes[1].getblockhash(invalidheight - 1)
        while curhash != mainchainhash:
            self.nodes[1].invalidateblock(curhash)
            curhash = self.nodes[1].getblockhash(invalidheight - 1)

        assert(self.nodes[1].getblockcount() == invalidheight - 1)
        print "New best height", self.nodes[1].getblockcount()

        # Reboot node1 to clear those giant tx's from mempool
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        print "Generating new longer chain of 300 more blocks"
        self.nodes[1].generate(300)

        print "Reconnect nodes"
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[2], 1)
        sync_blocks(self.nodes[0:3])

        print "Verify height on node 2:",self.nodes[2].getblockcount()
        print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)

        print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
        self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
        sync_blocks(self.nodes[0:3])

        usage = calc_usage(self.prunedir)
        print "Usage should be below target:", usage
        if (usage > 550):
            raise AssertionError("Pruning target not being met")

        return invalidheight,badhash
Esempio n. 12
0
    def run_test (self):
        # Bring all nodes to just before the activation block
        print("Mining blocks...")
        self.nodes[0].generate(8)
        block9 = self.nodes[0].generate(1)[0]
        self.sync_all()

        assert_equal(self.nodes[0].getbestblockhash(), block9)
        assert_equal(self.nodes[1].getbestblockhash(), block9)

        print("Mining diverging blocks")
        block10s = self.nodes[1].generate(1)[0]
        block10o = self.nodes[2].generate(1)[0]
        self.sync_all()

        assert_equal(self.nodes[0].getbestblockhash(), block10o)
        assert_equal(self.nodes[1].getbestblockhash(), block10s)
        assert_equal(self.nodes[2].getbestblockhash(), block10o)

        # Restart node 0 using Sprout instead of Overwinter
        print("Switching node 0 from Overwinter to Sprout")
        self.nodes[0].stop()
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0,self.options.tmpdir)
        connect_nodes_bi(self.nodes,0,1)
        connect_nodes_bi(self.nodes,1,2)
        connect_nodes_bi(self.nodes,0,2)

        # Assume node 1 will send block10s to node 0 quickly
        # (if we used self.sync_all() here and there was a bug, the test would hang)
        time.sleep(2)

        # Node 0 has rewound and is now on the Sprout chain
        assert_equal(self.nodes[0].getblockcount(), 10)
        assert_equal(self.nodes[0].getbestblockhash(), block10s)

        # Restart node 0 using Overwinter instead of Sprout
        print("Switching node 0 from Sprout to Overwinter")
        self.nodes[0].stop()
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0,self.options.tmpdir, extra_args=['-nuparams=5ba81b19:10'])
        connect_nodes_bi(self.nodes,0,1)
        connect_nodes_bi(self.nodes,1,2)
        connect_nodes_bi(self.nodes,0,2)

        # Assume node 2 will send block10o to node 0 quickly
        # (if we used self.sync_all() here and there was a bug, the test would hang)
        time.sleep(2)

        # Node 0 has rewound and is now on the Overwinter chain again
        assert_equal(self.nodes[0].getblockcount(), 10)
        assert_equal(self.nodes[0].getbestblockhash(), block10o)
Esempio n. 13
0
 def setup_network(self, test, additional_args=[]):
     args = ['-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress']
     args += additional_args
     test.nodes = []
     test.nodes.append(start_node(0, test.options.tmpdir, args))
     test.nodes.append(start_node(1, test.options.tmpdir, args))
     args2 = ['-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress', '-mempooltxinputlimit=7']
     args2 += additional_args
     test.nodes.append(start_node(2, test.options.tmpdir, args2))
     connect_nodes_bi(test.nodes, 0, 1)
     connect_nodes_bi(test.nodes, 1, 2)
     connect_nodes_bi(test.nodes, 0, 2)
     test.is_network_split = False
     test.sync_all()
Esempio n. 14
0
    def run_test (self):
        print "Mining blocks..."
        self.nodes[0].generate(4)
        self.sync_all()
        self.nodes[1].generate(101)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), 40)

        txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
        txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()

        txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
        txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5)

        tx0 = self.nodes[0].gettransaction(txid0)
        assert_equal(tx0['txid'], txid0) # tx0 must be available (confirmed)

        tx1 = self.nodes[0].gettransaction(txid1)
        assert_equal(tx1['txid'], txid1) # tx1 must be available (confirmed)

        tx2 = self.nodes[0].gettransaction(txid2)
        assert_equal(tx2['txid'], txid2) # tx2 must be available (unconfirmed)

        tx3 = self.nodes[0].gettransaction(txid3)
        assert_equal(tx3['txid'], txid3) # tx3 must be available (unconfirmed)

        # restart zcashd
        self.nodes[0].stop()
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0,self.options.tmpdir)

        tx3 = self.nodes[0].gettransaction(txid3)
        assert_equal(tx3['txid'], txid3) # tx must be available (unconfirmed)

        self.nodes[0].stop()
        bitcoind_processes[0].wait()

        # restart zcashd with zapwallettxes
        self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])

        aException = False
        try:
            tx3 = self.nodes[0].gettransaction(txid3)
        except JSONRPCException,e:
            print e
            aException = True
Esempio n. 15
0
    def setup_network(self):
        self.nodes = []
        # Nodes 0/1 are "wallet" nodes
        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
        self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
        # Nodes 2/3 are used for testing
        self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
        self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[0], 3)

        self.is_network_split = False
        self.sync_all()
Esempio n. 16
0
    def setup_network(self):
        self.nodes = []
        self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
        with open(self.alert_filename, 'w'):
            pass  # Just open then close to create zero-length file
        self.nodes.append(start_node(0, self.options.tmpdir,
                            ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
        # Node1 mines block.version=211 blocks
        self.nodes.append(start_node(1, self.options.tmpdir,
                                ["-blockversion=211"]))
        connect_nodes(self.nodes[1], 0)

        self.is_network_split = False
        self.sync_all()
Esempio n. 17
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"

        for j in xrange(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
            stop_node(self.nodes[0],0)
            self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
            # Mine 24 blocks in node 1
            self.utxo = self.nodes[1].listunspent()
            for i in xrange(24):
                if j == 0:
                    self.mine_full_block(self.nodes[1],self.address[1])
                else:
                    self.nodes[1].generate(1) #tx's already in mempool from previous disconnects

            # Reorg back with 25 block chain from node 0
            self.utxo = self.nodes[0].listunspent()
            for i in xrange(25): 
                self.mine_full_block(self.nodes[0],self.address[0])

            # Create connections in the order so both nodes can see the reorg at the same time
            connect_nodes(self.nodes[1], 0)
            connect_nodes(self.nodes[2], 0)
            sync_blocks(self.nodes[0:3])

        print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
Esempio n. 18
0
 def restart_second_node(self, extra_args=[]):
     self.nodes[1].stop()
     bitcoind_processes[1].wait()
     self.nodes[1] = start_node(1, self.options.tmpdir, extra_args=['-regtestprotectcoinbase','-debug=zrpc'] + extra_args)
     self.nodes[1].setmocktime(starttime + 9000)
     connect_nodes_bi(self.nodes, 0, 1)
     self.sync_all()
Esempio n. 19
0
 def setup_network(self):
     self.nodes = []
     self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
     with open(self.alert_filename, 'w'):
         pass  # Just open then close to create zero-length file
     self.nodes.append(start_node(0, self.options.tmpdir,
                         ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
Esempio n. 20
0
 def run_test(self):
     self.nodes[0].generate(3)
     stop_node(self.nodes[0], 0)
     wait_bitcoinds()
     self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
     assert_equal(self.nodes[0].getblockcount(), 3)
     print "Success"
    def run_test(self):
        # Stop nodes which have been started by default
        stop_nodes(self.nodes)

        # Start them up again because test framework tries to stop
        # nodes at end of test, and will deliver error messages
        # if none are running.
        for i in range(NUM_NODES):
            self.nodes[i] = start_node(i, self.options.tmpdir)
Esempio n. 22
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
        assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
        # This will throw an exception because 127.0.0.1/42 is not a real subnet
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        # This will throw an exception because 127.0.0.1 was not added above
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        # test persisted banlist
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        assert wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        ###########################
        # RPC disconnectnode test #
        ###########################
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
Esempio n. 23
0
    def run_test (self):
        tmpdir = self.options.tmpdir

        # Import a non-HD private key in the HD wallet
        non_hd_add = self.nodes[0].getnewaddress()
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))

        # This should be enough to keep the master key and the non-HD key 
        self.nodes[1].backupwallet(tmpdir + "hd.bak")
        #self.nodes[1].dumpwallet(tmpdir + "hd.dump")

        # Derive some HD addresses and remember the last
        # Also send funds to each add
        self.nodes[0].generate(101)
        hd_add = None
        num_hd_adds = 300
        for _ in range(num_hd_adds):
            hd_add = self.nodes[1].getnewaddress()
            self.nodes[0].sendtoaddress(hd_add, 1)
            self.nodes[0].generate(1)
        self.nodes[0].sendtoaddress(non_hd_add, 1)
        self.nodes[0].generate(1)

        self.sync_all()
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        print("Restore backup ...")
        self.stop_node(1)
        os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "hd.bak", tmpdir + "/node1/regtest/wallet.dat")
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
        #connect_nodes_bi(self.nodes, 0, 1)

        # Assert that derivation is deterministic
        hd_add_2 = None
        for _ in range(num_hd_adds):
            hd_add_2 = self.nodes[1].getnewaddress()
        assert_equal(hd_add, hd_add_2)

        # Needs rescan
        self.stop_node(1)
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
        #connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
Esempio n. 24
0
 def reindex(self, justchainstate=False):
     self.nodes[0].generate(3)
     blockcount = self.nodes[0].getblockcount()
     stop_node(self.nodes[0], 0)
     wait_bitcoinds()
     self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"])
     while self.nodes[0].getblockcount() < blockcount:
         time.sleep(0.1)
     assert_equal(self.nodes[0].getblockcount(), blockcount)
     print("Success")
Esempio n. 25
0
 def setup_network(self, split=False):
     args = [
         '-regtestprotectcoinbase', '-debug=zrpcunsafe',
         '-experimentalfeatures', '-zshieldcoinbase'
     ]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     args2 = [
         '-regtestprotectcoinbase', '-debug=zrpcunsafe',
         '-experimentalfeatures', '-zshieldcoinbase',
         "-mempooltxinputlimit=7"
     ]
     self.nodes.append(start_node(2, self.options.tmpdir, args2))
     connect_nodes_bi(self.nodes, 0, 1)
     connect_nodes_bi(self.nodes, 1, 2)
     connect_nodes_bi(self.nodes, 0, 2)
     self.is_network_split = False
     self.sync_all()
Esempio n. 26
0
 def restart_second_node(self, extra_args=[]):
     self.nodes[1].stop()
     bitcoind_processes[1].wait()
     self.nodes[1] = start_node(
         1,
         self.options.tmpdir,
         extra_args=['-regtestprotectcoinbase', '-debug=zrpc'] + extra_args)
     self.nodes[1].setmocktime(starttime + 9000)
     connect_nodes_bi(self.nodes, 0, 1)
     self.sync_all()
Esempio n. 27
0
 def setup_network(self):
     self.nodes = []
     self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
     with open(self.alert_filename, 'w', encoding='utf8'):
         pass  # Just open then close to create zero-length file
     self.nodes.append(
         start_node(0, self.options.tmpdir, [
             "-blockversion=2",
             "-alertnotify=echo %s >> \"" + self.alert_filename + "\""
         ]))
Esempio n. 28
0
 def setup_network(self, test, additional_args=[]):
     args = [
         '-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress'
     ]
     args += additional_args
     test.nodes = []
     test.nodes.append(start_node(0, test.options.tmpdir, args))
     test.nodes.append(start_node(1, test.options.tmpdir, args))
     args2 = [
         '-debug=zrpcunsafe', '-experimentalfeatures', '-zmergetoaddress',
         '-mempooltxinputlimit=7'
     ]
     args2 += additional_args
     test.nodes.append(start_node(2, test.options.tmpdir, args2))
     connect_nodes_bi(test.nodes, 0, 1)
     connect_nodes_bi(test.nodes, 1, 2)
     connect_nodes_bi(test.nodes, 0, 2)
     test.is_network_split = False
     test.sync_all()
Esempio n. 29
0
 def setup_network(self, split=False):
     args = ['-debug=zrpcunsafe']
     args2 = ['-debug=zrpcunsafe']
     if self.addr_type != 'sprout':
         nu = [
             '-nuparams=5ba81b19:0',  # Overwinter
             '-nuparams=76b809bb:1',  # Sapling
         ]
         args.extend(nu)
         args2 = args
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.nodes.append(start_node(1, self.options.tmpdir, args))
     self.nodes.append(start_node(2, self.options.tmpdir, args2))
     connect_nodes_bi(self.nodes, 0, 1)
     connect_nodes_bi(self.nodes, 1, 2)
     connect_nodes_bi(self.nodes, 0, 2)
     self.is_network_split = False
     self.sync_all()
Esempio n. 30
0
    def setup_network(self):
        args0 = ["-printpriority"]
        args1 = ["-printpriority", "-blockmaxcomplexity=9"]
        args2 = ["-printpriority", "-blockprioritysize=0"]
        args3 = [
            "-printpriority", "-blockprioritysize=0", "-blockmaxcomplexity=9"
        ]

        self.nodes = []
        self.nodes.append(start_node(0, self.options.tmpdir, args0))
        self.nodes.append(start_node(1, self.options.tmpdir, args1))
        self.nodes.append(start_node(2, self.options.tmpdir, args2))
        self.nodes.append(start_node(3, self.options.tmpdir, args3))

        connect_nodes(self.nodes[1], 0)
        connect_nodes(self.nodes[2], 0)
        connect_nodes(self.nodes[3], 0)

        self.is_network_split = False
        self.sync_all
    def setup_network(self, split=False):
        self.nodes = []
        self.nodes += [
            start_node(0,
                       self.options.tmpdir,
                       extra_args=[
                           '-txindex=1', '-spentindex=1', '-addressindex=1',
                           '-timestampindex=1',
                           '-zmqpubrawtx=tcp://127.0.0.1:13000',
                           '-zmqpubhashblock=tcp://127.0.0.1:13000'
                       ])
        ]
        self.nodes += [start_node(1, self.options.tmpdir)]
        self.nodes += [start_node(2, self.options.tmpdir)]

        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)

        self.is_network_split = False
        self.sync_all()
Esempio n. 32
0
    def run_test(self):
        # test default wallet location
        assert os.path.isfile(
            os.path.join(self.options.tmpdir, "node0", "regtest",
                         "wallet.dat"))

        # test alternative wallet file name in datadir
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-wallet=altwallet.dat"])
        assert os.path.isfile(
            os.path.join(self.options.tmpdir, "node0", "regtest",
                         "altwallet.dat"))

        # test wallet file outside datadir
        tempname = os.path.join(self.options.tmpdir, "outsidewallet.dat")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-wallet=%s" % tempname])
        assert os.path.isfile(tempname)

        # test the case where absolute path does not exist
        assert not os.path.isdir("/this_directory_must_not_exist")
        invalidpath = os.path.join("/this_directory_must_not_exist/",
                                   "foo.dat")
        stop_node(self.nodes[0], 0)
        assert_start_raises_init_error(
            0, "-wallet=%s" % invalidpath,
            "Error: Absolute path %s does not exist")

        # relative path do not exist
        invalidpath = os.path.join("wallet", "foo.dat")
        assert_start_raises_init_error(
            0, "-wallet=%s" % invalidpath,
            "Error: Relative path %s does not exist")

        # create dir and retry
        os.mkdir(
            os.path.join(self.options.tmpdir, "node0", "regtest", "wallet"))
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-wallet=%s" % invalidpath])
Esempio n. 33
0
    def run_test(self):
        tmpdir = self.options.tmpdir
        backupsdir = tmpdir + "/node0/regtest/backups/"

        # generate 20 addresses to compare against the dump
        test_addr_count = 20
        addrs = []
        for i in range(0, test_addr_count):
            addr = self.nodes[0].getnewaddress()
            vaddr = self.nodes[0].validateaddress(
                addr)  #required to get hd keypath
            addrs.append(vaddr)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()

        # dump unencrypted wallet
        # note that the RPC extracts only the filename
        # thus writing to the backups/ default directory
        self.nodes[0].dumpwallet("/node0/wallet.unencrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
            read_dump(backupsdir + "wallet.unencrypted.dump", addrs, None)
        assert_equal(found_addr,
                     test_addr_count)  # all keys must be in the dump
        assert_equal(found_addr_chg, 30)  # 30 blocks where mined
        assert_equal(found_addr_rsv,
                     90 + 1)  # keypool size (TODO: fix off-by-one)

        #encrypt wallet, restart, unlock and dump
        self.nodes[0].encryptwallet('test')
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.nodes[0].walletpassphrase('test', 10)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
            read_dump(backupsdir + "wallet.encrypted.dump", addrs, hd_master_addr_unenc)
        assert_equal(found_addr, test_addr_count)
        assert_equal(found_addr_chg,
                     90 + 1 + 30)  # old reserve keys are marked as change now
        assert_equal(found_addr_rsv,
                     90 + 1)  # keypool size (TODO: fix off-by-one)

        try:
            self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
            raise AssertionError(
                "dumpwallet should throw exception instead of overwriting existing file"
            )
        except JSONRPCException as e:
            assert ("Wallet dump file already exists; not overwriting"
                    in e.error["message"])
Esempio n. 34
0
 def setup_network(self):
     # Start a node with maxuploadtarget of 200 MB (/24h)
     self.nodes = []
     self.nodes.append(
         start_node(
             0,
             self.options.tmpdir,
             [
                 "-debug",
                 '-nuparams=2bb40e60:1',  # Blossom
                 "-maxuploadtarget=2200"
             ]))
Esempio n. 35
0
    def run_test(self):
        nodes = self.nodes
        # Encrypt wallet and wait to terminate
        nodes[0].encryptwallet('test')
        pasteld_processes[0].wait()
        # Restart node 0
        nodes[0] = start_node(0, self.options.tmpdir)
        # Keep creating keys
        addr = nodes[0].getnewaddress()
        try:
            addr = nodes[0].getnewaddress()
            raise AssertionError(
                'Keypool should be exhausted after one address')
        except JSONRPCException as e:
            assert (e.error['code'] == -12)

        # put three new keys in the keypool
        nodes[0].walletpassphrase('test', 12000)
        nodes[0].keypoolrefill(3)
        nodes[0].walletlock()

        # drain the keys
        addr = set()
        addr.add(nodes[0].getrawchangeaddress())
        addr.add(nodes[0].getrawchangeaddress())
        addr.add(nodes[0].getrawchangeaddress())
        addr.add(nodes[0].getrawchangeaddress())
        # assert that four unique addresses were returned
        assert (len(addr) == 4)
        # the next one should fail
        try:
            addr = nodes[0].getrawchangeaddress()
            raise AssertionError(
                'Keypool should be exhausted after three addresses')
        except JSONRPCException as e:
            assert (e.error['code'] == -12)

        # refill keypool with three new addresses
        nodes[0].walletpassphrase('test', 12000)
        nodes[0].keypoolrefill(3)
        nodes[0].walletlock()

        # drain them by mining
        nodes[0].generate(1)
        nodes[0].generate(1)
        nodes[0].generate(1)
        nodes[0].generate(1)
        try:
            nodes[0].generate(1)
            raise AssertionError(
                'Keypool should be exhausted after three addresses')
        except JSONRPCException as e:
            assert_equal(e.error['code'], -12)
Esempio n. 36
0
    def setup_network(self):
        self.nodes = []
        self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
        with open(self.alert_filename, 'w'):
            pass  # Just open then close to create zero-length file
        self.nodes.append(
            start_node(0, self.options.tmpdir, [
                "-blockversion=%d" % ILLEGAL_VERSION,
                "-alertnotify=echo %s >> \"" + self.alert_filename + "\""
            ]))
        # Node1 mines block.version=211 blocks
        self.nodes.append(
            start_node(1, self.options.tmpdir,
                       ["-blockversion=%d" % UP_VERSION]))
        connect_nodes(self.nodes[1], 0)

        self.nodes.append(start_node(2, self.options.tmpdir, []))
        connect_nodes(self.nodes[2], 1)

        self.is_network_split = False
        self.sync_all()
Esempio n. 37
0
    def setup_masternodes_network(
            self,
            private_keys_list,
            number_of_non_mn_to_start=0,
            debug_flags="masternode,mnpayments,governance"):
        for index, key in enumerate(private_keys_list):
            print(f"start MN {index}")
            self.nodes.append(
                start_node(index, self.options.tmpdir, [
                    f"-debug={debug_flags}", "-masternode", "-txindex=1",
                    "-reindex", f"-masternodeprivkey={key}"
                ]))

        for index2 in range(index + 1, index + number_of_non_mn_to_start + 1):
            print(f"start non-MN {index2}")
            self.nodes.append(
                start_node(index2, self.options.tmpdir,
                           [f"-debug={debug_flags}"]))

        for pair in itertools.combinations(range(index2 + 1), 2):
            connect_nodes_bi(self.nodes, pair[0], pair[1])
Esempio n. 38
0
    def setup_network(self):
        self.nodes = []
        self.is_network_split = False

        # Create nodes 0 and 1 to mine
        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
        self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))

        # Create node 2 to test pruning
        self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
        self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"

        self.address[0] = self.nodes[0].getnewaddress()
        self.address[1] = self.nodes[1].getnewaddress()

        # Determine default relay fee
        self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]

        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 2)
        connect_nodes(self.nodes[2], 0)
        sync_blocks(self.nodes[0:3])
Esempio n. 39
0
def run_test(nodes, tmpdir):
    # Encrypt wallet and wait to terminate
    nodes[0].encryptwallet('test')
    bitcoind_processes[0].wait()
    # Restart node 0
    nodes[0] = start_node(0, tmpdir)
    # Keep creating keys
    addr = nodes[0].getnewaddress()
    try:
        addr = nodes[0].getnewaddress()
        raise AssertionError('Keypool should be exhausted after one address')
    except JSONRPCException,e:
        assert(e.error['code']==-12)
Esempio n. 40
0
    def setup_network(self):
        self.nodes = []
        self.is_network_split = False

        # Create nodes 0 and 1 to mine
        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
        self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))

        # Create node 2 to test pruning
        self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
        self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"

        self.address[0] = self.nodes[0].getnewaddress()
        self.address[1] = self.nodes[1].getnewaddress()

        # Determine default relay fee
        self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]

        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 2)
        connect_nodes(self.nodes[2], 0)
        sync_blocks(self.nodes[0:3])
Esempio n. 41
0
    def setup_network(self):
        self.nodes = []
        # Nodes 0/1 are "wallet" nodes
        self.nodes.append(
            start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
        self.nodes.append(
            start_node(1, self.options.tmpdir,
                       ["-debug", "-addressindex", "-txindex"]))
        # Nodes 2/3 are used for testing
        self.nodes.append(
            start_node(
                2, self.options.tmpdir,
                ["-debug", "-addressindex", "-txindex", "-relaypriority=0"]))
        self.nodes.append(
            start_node(3, self.options.tmpdir,
                       ["-debug", "-addressindex", "-txindex"]))
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[0], 3)

        self.is_network_split = False
        self.sync_all()
Esempio n. 42
0
def run_test(nodes, tmpdir):
    # Encrypt wallet and wait to terminate
    nodes[0].encryptwallet('test')
    litecoinzd_processes[0].wait()
    # Restart node 0
    nodes[0] = start_node(0, tmpdir)
    # Keep creating keys
    addr = nodes[0].getnewaddress()
    try:
        addr = nodes[0].getnewaddress()
        raise AssertionError('Keypool should be exhausted after one address')
    except JSONRPCException, e:
        assert (e.error['code'] == -12)
Esempio n. 43
0
    def check_mnemonic_works(self,
                             masterprivkey,
                             mnemonic,
                             language="english"):
        self.stop_node(0)
        os.remove(self.options.tmpdir + "/node0/devnet/wallet.dat")

        self.nodes[0] = start_node(0, self.options.tmpdir, [
            "-importmnemonic=" + mnemonic, "-mnemoniclanguage=" + language,
            "-keypool=5"
        ])
        self.nodes[0].staking(False)

        assert (len(self.nodes[0].listtransactions()) == 1)
        assert_equal(masterprivkey, self.nodes[0].dumpmasterprivkey())
Esempio n. 44
0
 def _test_stopatheight(self):
     assert_equal(self.nodes[0].getblockcount(), 200)
     self.nodes[0].generate(6)
     assert_equal(self.nodes[0].getblockcount(), 206)
     self.log.debug('Node should not stop at this height')
     assert_raises(subprocess.TimeoutExpired,
                   lambda: bitcoind_processes[0].wait(timeout=3))
     try:
         self.nodes[0].generate(1)
     except (ConnectionError, http.client.BadStatusLine):
         pass  # The node already shut down before response
     self.log.debug('Node should stop at this height...')
     bitcoind_processes[0].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
     self.nodes[0] = start_node(0, self.options.tmpdir)
     assert_equal(self.nodes[0].getblockcount(), 207)
Esempio n. 45
0
    def run_test(self):
        print("Mining blocks...")
        self.nodes[0].generate(101)

        offline_node = start_node(
            1, self.options.tmpdir,
            ["-maxconnections=0", "-nuparams=2bb40e60:10"])
        self.nodes.append(offline_node)

        assert_equal(0, len(
            offline_node.getpeerinfo()))  # make sure node 1 has no peers

        taddr = self.nodes[0].getnewaddress()

        tx = self.nodes[0].listunspent()[0]
        txid = tx['txid']
        scriptpubkey = tx['scriptPubKey']
        privkeys = [self.nodes[0].dumpprivkey(tx['address'])]

        create_inputs = [{'txid': txid, 'vout': 0}]
        sign_inputs = [{
            'txid': txid,
            'vout': 0,
            'scriptPubKey': scriptpubkey,
            'amount': 3920000 * 0.97
        }]

        create_hex = self.nodes[0].createrawtransaction(
            create_inputs, {taddr: 3802399.9999})

        # An offline regtest node does not rely on the approx release height of the software
        # to determine the consensus rules to be used for signing.
        try:
            signed_tx = offline_node.signrawtransaction(
                create_hex, sign_inputs, privkeys)
            self.nodes[0].sendrawtransaction(signed_tx['hex'])
            assert (False)
        except JSONRPCException:
            pass

        # Passing in the consensus branch id resolves the issue for offline regtest nodes.
        signed_tx = offline_node.signrawtransaction(create_hex, sign_inputs,
                                                    privkeys, "ALL",
                                                    "2bb40e60")

        # If we return the transaction hash, then we have have not thrown an error (success)
        online_tx_hash = self.nodes[0].sendrawtransaction(signed_tx['hex'])
        assert_true(len(online_tx_hash) > 0)
Esempio n. 46
0
    def run_test(self):
        # Generate shared state up to the network split
        logging.info("Generating initial blocks.")
        self.nodes[0].generate(13)
        block14 = self.nodes[0].generate(1)[0]
        logging.info("Syncing network after initial generation...")
        self.sync_all()  # Everyone is still on overwinter

        logging.info("Checking overwinter block propagation.")
        assert_equal(self.nodes[0].getbestblockhash(), block14)
        assert_equal(self.nodes[1].getbestblockhash(), block14)
        assert_equal(self.nodes[2].getbestblockhash(), block14)
        logging.info("All nodes are on overwinter.")

        logging.info("Generating network split...")
        self.is_network_split = True

        # generate past the boundary into sapling; this will become the "canonical" branch
        self.nodes[0].generate(50)
        expected = self.nodes[0].getbestblockhash()

        # generate blocks into sapling beyond the maximum rewind length (99 blocks)
        self.nodes[2].generate(120)
        self.sync_all()

        assert_true(expected != self.nodes[2].getbestblockhash(),
                    "Split chains have not diverged!")

        # Stop the overwinter node to ensure state is flushed to disk.
        logging.info("Shutting down lagging node...")
        self.nodes[2].stop()
        bitcoind_processes[2].wait()

        # Restart the nodes, reconnect, and sync the network. This succeeds if "-reindex" is passed.
        logging.info("Reconnecting the network...")

        # expect an exception; the node will refuse to fully start because its last point of
        # agreement with the rest of the network was prior to the network upgrade activation
        assert_start_raises_init_error(2, self.options.tmpdir, HAS_SAPLING,
                                       "roll back 120")

        # restart the node with -reindex to allow the test to complete gracefully,
        # otherwise the node shutdown call in test cleanup will throw an error since
        # it can't connect
        self.nodes[2] = start_node(2,
                                   self.options.tmpdir,
                                   extra_args=NO_SAPLING + ["-reindex"])
Esempio n. 47
0
    def run_test(self):
        tmpdir = self.options.tmpdir

        # generate 20 addresses to compare against the dump
        test_addr_count = 20
        addrs = []
        for i in range(0, test_addr_count):
            addr = self.nodes[0].getnewaddress()
            vaddr = self.nodes[0].validateaddress(
                addr)  #required to get hd keypath
            addrs.append(vaddr)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()

        # dump unencrypted wallet
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
            read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
        assert_equal(found_addr,
                     test_addr_count)  # all keys must be in the dump
        assert_equal(found_addr_chg, 50)  # 50 blocks where mined
        assert_equal(found_addr_rsv,
                     90 + 1)  # keypool size (TODO: fix off-by-one)

        #encrypt wallet, restart, unlock and dump
        self.nodes[0].encryptwallet('test')
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.nodes[0].walletpassphrase('test', 10)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
            read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
        assert_equal(found_addr, test_addr_count)
        assert_equal(found_addr_chg,
                     90 + 1 + 50)  # old reserve keys are marked as change now
        assert_equal(found_addr_rsv,
                     90 + 1)  # keypool size (TODO: fix off-by-one)

        # Overwriting should fail
        assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet,
                                tmpdir + "/node0/wallet.unencrypted.dump")
    def run_test(self):
        tmpdir = self.options.tmpdir

        # generate 20 addresses to compare against the dump
        test_addr_count = 20
        addrs = []
        for i in range(0, test_addr_count):
            addr = self.nodes[0].getnewaddress()
            vaddr = self.nodes[0].validateaddress(
                addr)  # required to get hd keypath
            addrs.append(vaddr)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()

        # dump unencrypted wallet
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
            read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
        assert_equal(found_addr, test_addr_count)
                     # all keys must be in the dump
        assert_equal(found_addr_chg, 50)  # 50 blocks where mined
        assert_equal(found_addr_rsv, 90 + 1)
                     # keypool size (TODO: fix off-by-one)

        # encrypt wallet, restart, unlock and dump
        self.nodes[0].encryptwallet('test')
        commerciumd_processes[0].wait()
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.nodes[0].walletpassphrase('test', 10)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
            read_dump(
                tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
        assert_equal(found_addr, test_addr_count)
        assert_equal(found_addr_chg, 90 + 1 + 50)
                     # old reserve keys are marked as change now
        assert_equal(found_addr_rsv, 90 + 1)
Esempio n. 49
0
    def setup_network(self):
        self.nodes = []
        self.is_network_split = False

        # Create nodes 0 and 1 to mine
        self.nodes.append(
            start_node(0,
                       self.options.tmpdir,
                       ["-debug", "-maxreceivebuffer=20000"],
                       timewait=1200))
        self.nodes.append(
            start_node(1,
                       self.options.tmpdir,
                       ["-debug", "-maxreceivebuffer=20000"],
                       timewait=1200))

        # Create node 2 to test pruning
        self.nodes.append(
            start_node(2,
                       self.options.tmpdir,
                       ["-debug", "-maxreceivebuffer=20000", "-prune=2200"],
                       timewait=1200))
        self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/"

        # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
        self.nodes.append(
            start_node(3,
                       self.options.tmpdir,
                       ["-debug=0", "-maxreceivebuffer=20000"],
                       timewait=1200))
        self.nodes.append(
            start_node(4,
                       self.options.tmpdir,
                       ["-debug=0", "-maxreceivebuffer=20000"],
                       timewait=1200))

        # Create nodes 5 to test wallet in prune mode, but do not connect
        self.nodes.append(
            start_node(5, self.options.tmpdir, ["-debug=0", "-prune=2200"]))

        # Determine default relay fee
        self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]

        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 2)
        connect_nodes(self.nodes[2], 0)
        connect_nodes(self.nodes[3], 4)
        sync_blocks(self.nodes[0:5])
    def run_test(self):
        super().run_test()

        sigma_start_block = 500

        self.nodes[0].generatetoaddress(100, self.addrs[0])
        self.nodes[0].generate(sigma_start_block - self.nodes[0].getblockcount())

        self.nodes[0].elysium_sendissuancefixed(
            self.addrs[0], 1, 1, 0, '', '', 'Sigma', '', '', '1000000', 1
        )
        self.nodes[0].generate(1)
        sigmaProperty = 3

        self.nodes[0].elysium_sendcreatedenomination(self.addrs[0], sigmaProperty, '1')
        self.nodes[0].generate(10)

        passphase = 'test'
        self.nodes[0].encryptwallet(passphase)
        bitcoind_processes[0].wait()
        self.nodes[0] = start_node(0, self.options.tmpdir, ['-elysium'])

        # try to mint using encrypted wallet
        assert_raises_message(
            JSONRPCException,
            'Wallet locked',
            self.nodes[0].elysium_sendmint, self.addrs[0], sigmaProperty, {"0":1}
        )

        self.nodes[0].walletpassphrase(passphase, 3)

        self.nodes[0].elysium_sendmint(self.addrs[0], sigmaProperty, {"0":1})

        sleep(3)

        assert_raises_message(
            JSONRPCException,
            'Wallet locked',
            self.nodes[0].elysium_sendmint, self.addrs[0], sigmaProperty, {"0":1}
        )
Esempio n. 51
0
    def run_test(self):
        tmpdir = self.options.tmpdir

        # generate 20 addresses to compare against the dump
        test_addr_count = 20
        addrs = []
        for i in range(0, test_addr_count):
            addr = self.nodes[0].getnewaddress()
            vaddr = self.nodes[0].validateaddress(
                addr)  #required to get hd keypath
            addrs.append(vaddr)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()

        # dump unencrypted wallet
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
            read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
        assert_equal(found_addr,
                     test_addr_count)  # all keys must be in the dump
        assert_equal(found_addr_chg, 50)  # 50 blocks where mined
        assert_equal(found_addr_rsv, 90 * 2)  # 90 keys plus 100% internal keys

        #encrypt wallet, restart, unlock and dump
        self.nodes[0].encryptwallet('test')
        oakcoind_processes[0].wait()
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.nodes[0].walletpassphrase('test', 10)
        # Should be a no-op:
        self.nodes[0].keypoolrefill()
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")

        found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
            read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
        assert_equal(found_addr, test_addr_count)
        assert_equal(found_addr_chg,
                     90 * 2 + 50)  # old reserve keys are marked as change now
        assert_equal(found_addr_rsv, 90 * 2)
Esempio n. 52
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        print(
            "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"
        )

        for j in range(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
            stop_node(self.nodes[0], 0)
            self.nodes[0] = start_node(
                0,
                self.options.tmpdir, [
                    "-debug", "-maxreceivebuffer=20000",
                    "-blockmaxsize=999000", "-checkblocks=5"
                ],
                timewait=900)
            # Mine 24 blocks in node 1
            self.utxo = self.nodes[1].listunspent()
            for i in range(24):
                if j == 0:
                    self.mine_full_block(self.nodes[1], self.address[1])
                else:
                    self.nodes[1].generate(
                        1)  #tx's already in mempool from previous disconnects

            # Reorg back with 25 block chain from node 0
            self.utxo = self.nodes[0].listunspent()
            for i in range(25):
                self.mine_full_block(self.nodes[0], self.address[0])

            # Create connections in the order so both nodes can see the reorg at the same time
            connect_nodes(self.nodes[1], 0)
            connect_nodes(self.nodes[2], 0)
            sync_blocks(self.nodes[0:3])

        print("Usage can be over target because of high stale rate:",
              calc_usage(self.prunedir))
Esempio n. 53
0
    def run_test(self):
        print "Mining blocks..."
        self.nodes[0].generate(101)

        offline_node = start_node(1, self.options.tmpdir, ["-maxconnections=0", "-nuparams=5ba81b19:10"])
        self.nodes.append(offline_node)

        assert_equal(0, len(offline_node.getpeerinfo())) # make sure node 1 has no peers

        privkeys = [self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())]
        taddr = self.nodes[0].getnewaddress()

        tx = self.nodes[0].listunspent()[0]
        txid = tx['txid']
        scriptpubkey = tx['scriptPubKey']

        create_inputs = [{'txid': txid, 'vout': 0}]
        sign_inputs = [{'txid': txid, 'vout': 0, 'scriptPubKey': scriptpubkey, 'amount': 10}]

        create_hex = self.nodes[0].createrawtransaction(create_inputs, {taddr: 9.9999})

        # An offline regtest node does not rely on the approx release height of the software
        # to determine the consensus rules to be used for signing.
        try:
            signed_tx = offline_node.signrawtransaction(create_hex, sign_inputs, privkeys)
            self.nodes[0].sendrawtransaction(signed_tx['hex'])
            assert(False)
        except JSONRPCException:
            pass

        # Passing in the consensus branch id resolves the issue for offline regtest nodes.
        signed_tx = offline_node.signrawtransaction(create_hex, sign_inputs, privkeys, "ALL", "5ba81b19")

        # If we return the transaction hash, then we have have not thrown an error (success)
        online_tx_hash = self.nodes[0].sendrawtransaction(signed_tx['hex'])
        assert_true(len(online_tx_hash) > 0)
Esempio n. 54
0
    def run_test(self):
        # add zaddr to node 0
        myzaddr0 = self.nodes[0].z_getnewaddress('sprout')

        # send node 0 taddr to zaddr to get out of coinbase
        # Tests using the default cached chain have one address per coinbase output
        mytaddr = get_coinbase_address(self.nodes[0])
        recipients = []
        recipients.append({
            "address": myzaddr0,
            "amount": Decimal('10.0') - Decimal('0.0001')
        })  # utxo amount less fee

        wait_and_assert_operationid_status(self.nodes[0],
                                           self.nodes[0].z_sendmany(
                                               mytaddr, recipients),
                                           timeout=120)

        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()

        # add zaddr to node 2
        myzaddr = self.nodes[2].z_getnewaddress('sprout')

        # import node 2 zaddr into node 1
        myzkey = self.nodes[2].z_exportkey(myzaddr)
        self.nodes[1].z_importkey(myzkey)

        # encrypt node 1 wallet and wait to terminate
        self.nodes[1].encryptwallet("test")
        bitcoind_processes[1].wait()

        # restart node 1
        self.nodes[1] = start_node(1, self.options.tmpdir)
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        self.sync_all()

        # send node 0 zaddr to note 2 zaddr
        recipients = []
        recipients.append({"address": myzaddr, "amount": 7.0})

        wait_and_assert_operationid_status(self.nodes[0],
                                           self.nodes[0].z_sendmany(
                                               myzaddr0, recipients),
                                           timeout=120)

        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()

        # check zaddr balance
        zsendmanynotevalue = Decimal('7.0')
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zsendmanynotevalue)
        assert_equal(self.nodes[1].z_getbalance(myzaddr), zsendmanynotevalue)

        # add zaddr to node 3
        myzaddr3 = self.nodes[3].z_getnewaddress('sprout')

        # send node 2 zaddr to note 3 zaddr
        recipients = []
        recipients.append({"address": myzaddr3, "amount": 2.0})

        wait_and_assert_operationid_status(self.nodes[2],
                                           self.nodes[2].z_sendmany(
                                               myzaddr, recipients),
                                           timeout=120)

        self.sync_all()
        self.nodes[2].generate(1)
        self.sync_all()

        # check zaddr balance
        zsendmany2notevalue = Decimal('2.0')
        zsendmanyfee = Decimal('0.0001')
        zaddrremaining = zsendmanynotevalue - zsendmany2notevalue - zsendmanyfee
        assert_equal(self.nodes[3].z_getbalance(myzaddr3), zsendmany2notevalue)
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zaddrremaining)

        # Parallel encrypted wallet can't cache nullifiers for received notes,
        # and therefore can't detect spends. So it sees a balance corresponding
        # to the sum of both notes it received (one as change).
        # TODO: Devise a way to avoid this issue (#1528)
        assert_equal(self.nodes[1].z_getbalance(myzaddr),
                     zsendmanynotevalue + zaddrremaining)

        # send node 2 zaddr on node 1 to taddr
        # This requires that node 1 be unlocked, which triggers caching of
        # uncached nullifiers.
        self.nodes[1].walletpassphrase("test", 600)
        mytaddr1 = self.nodes[1].getnewaddress()
        recipients = []
        recipients.append({"address": mytaddr1, "amount": 1.0})

        wait_and_assert_operationid_status(self.nodes[1],
                                           self.nodes[1].z_sendmany(
                                               myzaddr, recipients),
                                           timeout=120)

        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # check zaddr balance
        # Now that the encrypted wallet has been unlocked, the note nullifiers
        # have been cached and spent notes can be detected. Thus the two wallets
        # are in agreement once more.
        zsendmany3notevalue = Decimal('1.0')
        zaddrremaining2 = zaddrremaining - zsendmany3notevalue - zsendmanyfee
        assert_equal(self.nodes[1].z_getbalance(myzaddr), zaddrremaining2)
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zaddrremaining2)

        # Test viewing keys

        node3mined = Decimal('250.0')
        assert_equal(
            {
                k: Decimal(v)
                for k, v in self.nodes[3].z_gettotalbalance().items()
            }, {
                'transparent': node3mined,
                'private': zsendmany2notevalue,
                'total': node3mined + zsendmany2notevalue,
            })

        # add node 1 address and node 2 viewing key to node 3
        myzvkey = self.nodes[2].z_exportviewingkey(myzaddr)
        self.nodes[3].importaddress(mytaddr1)
        self.nodes[3].z_importviewingkey(myzvkey, 'whenkeyisnew', 1)

        # Check the address has been imported
        assert_equal(myzaddr in self.nodes[3].z_listaddresses(), False)
        assert_equal(myzaddr in self.nodes[3].z_listaddresses(True), True)

        # Node 3 should see the same received notes as node 2; however,
        # some of the notes were change for node 2 but not for node 3.
        # Aside from that the recieved notes should be the same. So,
        # group by txid and then check that all properties aside from
        # change are equal.
        node2Received = dict(
            [r['txid'], r]
            for r in self.nodes[2].z_listreceivedbyaddress(myzaddr))
        node3Received = dict(
            [r['txid'], r]
            for r in self.nodes[3].z_listreceivedbyaddress(myzaddr))
        assert_equal(len(node2Received), len(node2Received))
        for txid in node2Received:
            received2 = node2Received[txid]
            received3 = node3Received[txid]
            # the change field will be omitted for received3, but all other fields should be shared
            assert_true(len(received2) >= len(received3))
            for key in received2:
                # check all the properties except for change
                if key != 'change':
                    assert_equal(received2[key], received3[key])

        # Node 3's balances should be unchanged without explicitly requesting
        # to include watch-only balances
        assert_equal(
            {
                k: Decimal(v)
                for k, v in self.nodes[3].z_gettotalbalance().items()
            }, {
                'transparent': node3mined,
                'private': zsendmany2notevalue,
                'total': node3mined + zsendmany2notevalue,
            })

        # Wallet can't cache nullifiers for notes received by addresses it only has a
        # viewing key for, and therefore can't detect spends. So it sees a balance
        # corresponding to the sum of all notes the address received.
        # TODO: Fix this during the Sapling upgrade (via #2277)
        assert_equal(
            {
                k: Decimal(v)
                for k, v in self.nodes[3].z_gettotalbalance(1, True).items()
            }, {
                'transparent':
                node3mined + Decimal('1.0'),
                'private':
                zsendmany2notevalue + zsendmanynotevalue + zaddrremaining +
                zaddrremaining2,
                'total':
                node3mined + Decimal('1.0') + zsendmany2notevalue +
                zsendmanynotevalue + zaddrremaining + zaddrremaining2,
            })

        # Check individual balances reflect the above
        assert_equal(self.nodes[3].z_getbalance(mytaddr1), Decimal('1.0'))
        assert_equal(self.nodes[3].z_getbalance(myzaddr),
                     zsendmanynotevalue + zaddrremaining + zaddrremaining2)
Esempio n. 55
0
 def setup_network(self):
     self.nodes = [
         start_node(0, self.options.tmpdir, ["-nuparams=2bb40e60:10"])
     ]
     self.is_network_split = False
     self.sync_all()
Esempio n. 56
0
        #   |          \
        #   +-------Node(3): [0]->..->[104]->[105m]...->[117m]->[118m]->..->[181m]->[182m]  <<==ACTIVE
        #                                \
        #                                 +->[105h]...->[116h]->[117h]->..->[181h]

        # check node1 balance has been erased
        assert self.nodes[1].getbalance() == 0.0
        print "\nNode1 balance has been erased!:", self.nodes[1].getbalance()

        #        raw_input("press enter to connect a brand new node..")

        # Connect a fifth node from scratch and update
        s = "Connecting a new node"
        print(s)
        self.mark_logs(s)
        self.nodes.append(start_node(4, self.options.tmpdir))
        connect_nodes_bi(self.nodes, 4, 3)
        connect_nodes_bi(self.nodes, 3, 4)
        sync_blocks(self.nodes, 1, True, 5)

        for i in range(0, 5):
            print "Node%d  ---" % i
            self.dump_ordered_tips(self.nodes[i].getchaintips())
            print "---"

        print("\nNode0 generating 1 new blocks")
        self.mark_logs("Node0 generating 1 new blocks")
        self.nodes[0].generate(1)
        print("New blocks generated")
        sync_blocks(self.nodes, 1, True, 5)
Esempio n. 57
0
 def setup_network(self):
     # Just need one node for this test
     args = ["-checkmempool", "-debug=mempool"]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.is_network_split = False
Esempio n. 58
0
    def run_test (self):
        tmpdir = self.options.tmpdir

        # Make sure can't switch off usehd after wallet creation
        self.stop_node(1)
        assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
        connect_nodes_bi(self.nodes, 0, 1)

        # Make sure we use hd, keep masterkeyid
        masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
        assert_equal(len(masterkeyid), 40)

        # create an internal key
        change_addr = self.nodes[1].getrawchangeaddress()
        change_addrV= self.nodes[1].validateaddress(change_addr);
        assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key

        # Import a non-HD private key in the HD wallet
        non_hd_add = self.nodes[0].getnewaddress()
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))

        # This should be enough to keep the master key and the non-HD key 
        self.nodes[1].backupwallet(tmpdir + "/hd.bak")
        #self.nodes[1].dumpwallet(tmpdir + "/hd.dump")

        # Derive some HD addresses and remember the last
        # Also send funds to each add
        self.nodes[0].generate(101)
        hd_add = None
        num_hd_adds = 300
        for i in range(num_hd_adds):
            hd_add = self.nodes[1].getnewaddress()
            hd_info = self.nodes[1].validateaddress(hd_add)
            assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
            assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
            self.nodes[0].sendtoaddress(hd_add, 1)
            self.nodes[0].generate(1)
        self.nodes[0].sendtoaddress(non_hd_add, 1)
        self.nodes[0].generate(1)

        # create an internal key (again)
        change_addr = self.nodes[1].getrawchangeaddress()
        change_addrV= self.nodes[1].validateaddress(change_addr);
        assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key

        self.sync_all()
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        self.log.info("Restore backup ...")
        self.stop_node(1)
        os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
        #connect_nodes_bi(self.nodes, 0, 1)

        # Assert that derivation is deterministic
        hd_add_2 = None
        for _ in range(num_hd_adds):
            hd_add_2 = self.nodes[1].getnewaddress()
            hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
            assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
        assert_equal(hd_add, hd_add_2)

        # Needs rescan
        self.stop_node(1)
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
        #connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        # send a tx and make sure its using the internal chain for the changeoutput
        txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
        keypath = ""
        for out in outs:
            if out['value'] != 1:
                keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
        
        assert_equal(keypath[0:7], "m/0'/1'")
Esempio n. 59
0
    def run_test (self):
        # add zaddr to node 0
        myzaddr0 = self.nodes[0].z_getnewaddress()

        # send node 0 taddr to zaddr to get out of coinbase
        mytaddr = self.nodes[0].getnewaddress();
        recipients = []
        recipients.append({"address":myzaddr0, "amount":Decimal('10.0')-Decimal('0.0001')}) # utxo amount less fee
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)

        opids = []
        opids.append(myopid)

        timeout = 120
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[0].z_getoperationresult(opids)
            if len(results)==0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                assert_equal("success", status)
                mytxid = results[0]["result"]["txid"]
                break

        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()

        # add zaddr to node 2
        myzaddr = self.nodes[2].z_getnewaddress()

        # import node 2 zaddr into node 1
        myzkey = self.nodes[2].z_exportkey(myzaddr)
        self.nodes[1].z_importkey(myzkey)

        # encrypt node 1 wallet and wait to terminate
        self.nodes[1].encryptwallet("test")
        bitcoind_processes[1].wait()

        # restart node 1
        self.nodes[1] = start_node(1, self.options.tmpdir)
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        self.sync_all()

        # send node 0 zaddr to note 2 zaddr
        recipients = []
        recipients.append({"address":myzaddr, "amount":7.0})
        myopid = self.nodes[0].z_sendmany(myzaddr0, recipients)

        opids = []
        opids.append(myopid)

        timeout = 120
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[0].z_getoperationresult(opids)
            if len(results)==0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                assert_equal("success", status)
                mytxid = results[0]["result"]["txid"]
                break

        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()

        # check zaddr balance
        zsendmanynotevalue = Decimal('7.0')
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zsendmanynotevalue)
        assert_equal(self.nodes[1].z_getbalance(myzaddr), zsendmanynotevalue)

        # add zaddr to node 3
        myzaddr3 = self.nodes[3].z_getnewaddress()

        # send node 2 zaddr to note 3 zaddr
        recipients = []
        recipients.append({"address":myzaddr3, "amount":2.0})
        myopid = self.nodes[2].z_sendmany(myzaddr, recipients)

        opids = []
        opids.append(myopid)

        timeout = 120
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[2].z_getoperationresult(opids)
            if len(results)==0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                assert_equal("success", status)
                mytxid = results[0]["result"]["txid"]
                break

        self.sync_all()
        self.nodes[2].generate(1)
        self.sync_all()

        # check zaddr balance
        zsendmany2notevalue = Decimal('2.0')
        zsendmanyfee = Decimal('0.0001')
        zaddrremaining = zsendmanynotevalue - zsendmany2notevalue - zsendmanyfee
        assert_equal(self.nodes[3].z_getbalance(myzaddr3), zsendmany2notevalue)
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zaddrremaining)

        # Parallel encrypted wallet can't cache nullifiers for received notes,
        # and therefore can't detect spends. So it sees a balance corresponding
        # to the sum of both notes it received (one as change).
        # TODO: Devise a way to avoid this issue (#1528)
        assert_equal(self.nodes[1].z_getbalance(myzaddr), zsendmanynotevalue + zaddrremaining)

        # send node 2 zaddr on node 1 to taddr
        # This requires that node 1 be unlocked, which triggers caching of
        # uncached nullifiers.
        self.nodes[1].walletpassphrase("test", 600)
        mytaddr1 = self.nodes[1].getnewaddress();
        recipients = []
        recipients.append({"address":mytaddr1, "amount":1.0})
        myopid = self.nodes[1].z_sendmany(myzaddr, recipients)

        opids = []
        opids.append(myopid)

        timeout = 120
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[1].z_getoperationresult(opids)
            if len(results)==0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                assert_equal("success", status)
                mytxid = results[0]["result"]["txid"]
                [mytxid] # hush pyflakes
                break

        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # check zaddr balance
        # Now that the encrypted wallet has been unlocked, the note nullifiers
        # have been cached and spent notes can be detected. Thus the two wallets
        # are in agreement once more.
        zsendmany3notevalue = Decimal('1.0')
        zaddrremaining2 = zaddrremaining - zsendmany3notevalue - zsendmanyfee
        assert_equal(self.nodes[1].z_getbalance(myzaddr), zaddrremaining2)
        assert_equal(self.nodes[2].z_getbalance(myzaddr), zaddrremaining2)

        # Test viewing keys

        node3mined = Decimal('250.0')
        assert_equal({k: Decimal(v) for k, v in self.nodes[3].z_gettotalbalance().items()}, {
            'transparent': node3mined,
            'private': zsendmany2notevalue,
            'total': node3mined + zsendmany2notevalue,
        })

        # add node 1 address and node 2 viewing key to node 3
        myzvkey = self.nodes[2].z_exportviewingkey(myzaddr)
        self.nodes[3].importaddress(mytaddr1)
        self.nodes[3].z_importviewingkey(myzvkey)

        # Check the address has been imported
        assert_equal(myzaddr in self.nodes[3].z_listaddresses(), False)
        assert_equal(myzaddr in self.nodes[3].z_listaddresses(True), True)

        # Node 3 should see the same received notes as node 2
        assert_equal(
            self.nodes[2].z_listreceivedbyaddress(myzaddr),
            self.nodes[3].z_listreceivedbyaddress(myzaddr))

        # Node 3's balances should be unchanged without explicitly requesting
        # to include watch-only balances
        assert_equal({k: Decimal(v) for k, v in self.nodes[3].z_gettotalbalance().items()}, {
            'transparent': node3mined,
            'private': zsendmany2notevalue,
            'total': node3mined + zsendmany2notevalue,
        })

        # Wallet can't cache nullifiers for notes received by addresses it only has a
        # viewing key for, and therefore can't detect spends. So it sees a balance
        # corresponding to the sum of all notes the address received.
        # TODO: Fix this during the Sapling upgrade (via #2277)
        assert_equal({k: Decimal(v) for k, v in self.nodes[3].z_gettotalbalance(1, True).items()}, {
            'transparent': node3mined + Decimal('1.0'),
            'private': zsendmany2notevalue + zsendmanynotevalue + zaddrremaining + zaddrremaining2,
            'total': node3mined + Decimal('1.0') + zsendmany2notevalue + zsendmanynotevalue + zaddrremaining + zaddrremaining2,
        })

        # Check individual balances reflect the above
        assert_equal(self.nodes[3].z_getbalance(mytaddr1), Decimal('1.0'))
        assert_equal(self.nodes[3].z_getbalance(myzaddr), zsendmanynotevalue + zaddrremaining + zaddrremaining2)
Esempio n. 60
0
 def setup_network(self):
     # Just need one node for this test
     args = ["-checkmempool", "-debug=mempool"]
     self.nodes = []
     self.nodes.append(start_node(0, self.options.tmpdir, args))
     self.is_network_split = False