Ejemplo n.º 1
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"

        for j in xrange(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
            stop_node(self.nodes[0],0)
            self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
            # Mine 24 blocks in node 1
            self.utxo = self.nodes[1].listunspent()
            for i in xrange(24):
                if j == 0:
                    self.mine_full_block(self.nodes[1],self.address[1])
                else:
                    self.nodes[1].generate(1) #tx's already in mempool from previous disconnects

            # Reorg back with 25 block chain from node 0
            self.utxo = self.nodes[0].listunspent()
            for i in xrange(25): 
                self.mine_full_block(self.nodes[0],self.address[0])

            # Create connections in the order so both nodes can see the reorg at the same time
            connect_nodes(self.nodes[1], 0)
            connect_nodes(self.nodes[2], 0)
            sync_blocks(self.nodes[0:3])

        print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
Ejemplo n.º 2
0
 def run_test(self):
     self.nodes[0].generate(3)
     stop_node(self.nodes[0], 0)
     wait_bitcoinds()
     self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
     assert_equal(self.nodes[0].getblockcount(), 3)
     print "Success"
Ejemplo n.º 3
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"

        for j in xrange(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
            stop_node(self.nodes[0],0)
            self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
            # Mine 24 blocks in node 1
            self.utxo = self.nodes[1].listunspent()
            for i in xrange(24):
                if j == 0:
                    self.mine_full_block(self.nodes[1],self.address[1])
                else:
                    self.nodes[1].generate(1) #tx's already in mempool from previous disconnects

            # Reorg back with 25 block chain from node 0
            self.utxo = self.nodes[0].listunspent()
            for i in xrange(25): 
                self.mine_full_block(self.nodes[0],self.address[0])

            # Create connections in the order so both nodes can see the reorg at the same time
            connect_nodes(self.nodes[1], 0)
            connect_nodes(self.nodes[2], 0)
            sync_blocks(self.nodes[0:3])

        print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
Ejemplo n.º 4
0
 def run_test(self):
     self.nodes[0].generate(3)
     stop_node(self.nodes[0], 0)
     wait_litecoinzds()
     self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
     assert_equal(self.nodes[0].getblockcount(), 3)
     print "Success"
Ejemplo n.º 5
0
    def excessiveblocksize_test(self):
        self.log.info("Testing -excessiveblocksize")

        self.log.info("  Set to twice the default, i.e. %d bytes" %
                      (2 * LEGACY_MAX_BLOCK_SIZE))
        stop_node(self.nodes[0], 0)
        self.extra_args = [[
            "-excessiveblocksize=%d" % (2 * LEGACY_MAX_BLOCK_SIZE)
        ]]
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE)
        # Check for EB correctness in the subver string
        self.check_subversion("/Commercium:.*\(EB2\.0; .*\)/")

        self.log.info(
            "  Attempt to set below legacy limit of 1MB - try %d bytes" %
            LEGACY_MAX_BLOCK_SIZE)
        outputchecker = OutputChecker()
        stop_node(self.nodes[0], 0)
        try:
            self.extra_args = [[
                "-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE
            ]]
            self.nodes[0] = start_node(0,
                                       self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert (outputchecker.contains(
                'Error: Excessive block size must be > 1,000,000 bytes (1MB)'))
            assert_equal(
                'commerciumd exited with status 1 during initialization',
                str(e))
        else:
            raise AssertionError("Must not accept excessiveblocksize"
                                 " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE)

        self.log.info("  Attempt to set below blockmaxsize (mining limit)")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [[
                '-blockmaxsize=1500000', '-excessiveblocksize=1300000'
            ]]
            self.nodes[0] = start_node(0,
                                       self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert (outputchecker.contains('Error: ' +
                                           MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal(
                'commerciumd exited with status 1 during initialization',
                str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        # Make sure we leave the test with a node running as this is what thee
        # framework expects.
        self.nodes[0] = start_node(0, self.options.tmpdir, [])
Ejemplo n.º 6
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
        assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
        # This will throw an exception because 127.0.0.1/42 is not a real subnet
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        # This will throw an exception because 127.0.0.1 was not added above
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        # test persisted banlist
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        assert wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        ###########################
        # RPC disconnectnode test #
        ###########################
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
Ejemplo n.º 7
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
        assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
        # This will throw an exception because 127.0.0.1/42 is not a real subnet
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        # This will throw an exception because 127.0.0.1 was not added above
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        # test persisted banlist
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        assert wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        ###########################
        # RPC disconnectnode test #
        ###########################
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
Ejemplo n.º 8
0
    def run_test(self):
        tmpdir = self.options.tmpdir

        # Make sure we use hd, keep masterkeyid
        masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
        assert_equal(len(masterkeyid), 40)

        # Import a non-HD private key in the HD wallet
        non_hd_add = self.nodes[0].getnewaddress()
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))

        # This should be enough to keep the master key and the non-HD key
        self.nodes[1].backupwallet(tmpdir + "hd.bak")
        #self.nodes[1].dumpwallet(tmpdir + "hd.dump")

        # Derive some HD addresses and remember the last
        # Also send funds to each add
        logging.info("Derive HD addresses ...")
        self.nodes[0].generate(101)
        hd_add = None
        num_hd_adds = 300
        for i in range(num_hd_adds):
            hd_add = self.nodes[1].getnewaddress()
            hd_info = self.nodes[1].validateaddress(hd_add)
            assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i + 1) + "'")
            assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
            self.nodes[0].sendtoaddress(hd_add, 1)
            self.nodes[0].generate(1)
        self.nodes[0].sendtoaddress(non_hd_add, 1)
        self.nodes[0].generate(1)

        self.sync_all()
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        logging.info("Restore backup ...")
        stop_node(self.nodes[1], 1)
        os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "hd.bak",
                        tmpdir + "/node1/regtest/wallet.dat")
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])

        # Assert that derivation is deterministic
        logging.info("Check derivation...")
        hd_add_2 = None
        for _ in range(num_hd_adds):
            hd_add_2 = self.nodes[1].getnewaddress()
            hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(_ + 1) + "'")
            assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
        assert_equal(hd_add, hd_add_2)

        # Needs rescan
        logging.info("Rescan ...")
        stop_node(self.nodes[1], 1)
        self.nodes[1] = start_node(1, self.options.tmpdir,
                                   self.node_args[1] + ['-rescan'])
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
Ejemplo n.º 9
0
 def reindex(self, justchainstate=False):
     self.nodes[0].generate(3)
     blockcount = self.nodes[0].getblockcount()
     stop_node(self.nodes[0], 0)
     wait_bitcoinds()
     self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"])
     while self.nodes[0].getblockcount() < blockcount:
         time.sleep(0.1)
     assert_equal(self.nodes[0].getblockcount(), blockcount)
     print("Success")
Ejemplo n.º 10
0
    def run_test (self):
        tmpdir = self.options.tmpdir

        # Make sure we use hd, keep masterkeyid
        masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
        assert_equal(len(masterkeyid), 40)

        # Import a non-HD private key in the HD wallet
        non_hd_add = self.nodes[0].getnewaddress()
        self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))

        # This should be enough to keep the master key and the non-HD key 
        self.nodes[1].backupwallet(tmpdir + "hd.bak")
        #self.nodes[1].dumpwallet(tmpdir + "hd.dump")

        # Derive some HD addresses and remember the last
        # Also send funds to each add
        logging.info("Derive HD addresses ...")
        self.nodes[0].generate(101)
        hd_add = None
        num_hd_adds = 300
        for i in range(num_hd_adds):
            hd_add = self.nodes[1].getnewaddress()
            hd_info = self.nodes[1].validateaddress(hd_add)
            assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
            assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
            self.nodes[0].sendtoaddress(hd_add, 1)
            self.nodes[0].generate(1)
        self.nodes[0].sendtoaddress(non_hd_add, 1)
        self.nodes[0].generate(1)

        self.sync_all()
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)

        logging.info("Restore backup ...")
        stop_node(self.nodes[1], 1)
        os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "hd.bak", tmpdir + "/node1/regtest/wallet.dat")
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])

        # Assert that derivation is deterministic
        logging.info ("Check derivation...")
        hd_add_2 = None
        for _ in range(num_hd_adds):
            hd_add_2 = self.nodes[1].getnewaddress()
            hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
            assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
            assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
        assert_equal(hd_add, hd_add_2)

        # Needs rescan
        logging.info("Rescan ...")
        stop_node(self.nodes[1], 1)
        self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
        assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
Ejemplo n.º 11
0
    def reorg_test(self):
        # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
        # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
        # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
        # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        height = self.nodes[1].getblockcount()
        print "Current block height:", height

        invalidheight = height-287
        badhash = self.nodes[1].getblockhash(invalidheight)
        print "Invalidating block at height:",invalidheight,badhash
        self.nodes[1].invalidateblock(badhash)

        # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
        # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
        mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
        curhash = self.nodes[1].getblockhash(invalidheight - 1)
        while curhash != mainchainhash:
            self.nodes[1].invalidateblock(curhash)
            curhash = self.nodes[1].getblockhash(invalidheight - 1)

        assert(self.nodes[1].getblockcount() == invalidheight - 1)
        print "New best height", self.nodes[1].getblockcount()

        # Reboot node1 to clear those giant tx's from mempool
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        print "Generating new longer chain of 300 more blocks"
        self.nodes[1].generate(300)

        print "Reconnect nodes"
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[2], 1)
        sync_blocks(self.nodes[0:3])

        print "Verify height on node 2:",self.nodes[2].getblockcount()
        print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)

        print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
        self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
        sync_blocks(self.nodes[0:3])

        usage = calc_usage(self.prunedir)
        print "Usage should be below target:", usage
        if (usage > 550):
            raise AssertionError("Pruning target not being met")

        return invalidheight,badhash
Ejemplo n.º 12
0
    def reorg_test(self):
        # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
        # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
        # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
        # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        height = self.nodes[1].getblockcount()
        print "Current block height:", height

        invalidheight = height-287
        badhash = self.nodes[1].getblockhash(invalidheight)
        print "Invalidating block at height:",invalidheight,badhash
        self.nodes[1].invalidateblock(badhash)

        # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
        # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
        mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
        curhash = self.nodes[1].getblockhash(invalidheight - 1)
        while curhash != mainchainhash:
            self.nodes[1].invalidateblock(curhash)
            curhash = self.nodes[1].getblockhash(invalidheight - 1)

        assert(self.nodes[1].getblockcount() == invalidheight - 1)
        print "New best height", self.nodes[1].getblockcount()

        # Reboot node1 to clear those giant tx's from mempool
        stop_node(self.nodes[1],1)
        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)

        print "Generating new longer chain of 300 more blocks"
        self.nodes[1].generate(300)

        print "Reconnect nodes"
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[2], 1)
        sync_blocks(self.nodes[0:3])

        print "Verify height on node 2:",self.nodes[2].getblockcount()
        print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)

        print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
        self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
        sync_blocks(self.nodes[0:3])

        usage = calc_usage(self.prunedir)
        print "Usage should be below target:", usage
        if (usage > 550):
            raise AssertionError("Pruning target not being met")

        return invalidheight,badhash
Ejemplo n.º 13
0
    def run_test(self):
        # test default wallet location
        assert os.path.isfile(os.path.join(self.options.tmpdir, "node0", "regtest", "wallet.dat"))

        # test alternative wallet file name in datadir
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir, ["-wallet=altwallet.dat"])
        assert os.path.isfile(os.path.join(self.options.tmpdir, "node0", "regtest", "altwallet.dat"))

        # test wallet file outside datadir
        tempname = os.path.join(self.options.tmpdir, "outsidewallet.dat")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir, ["-wallet=%s" % tempname])
        assert os.path.isfile(tempname)

        # test the case where absolute path does not exist
        assert not os.path.isdir("/this_directory_must_not_exist")
        invalidpath = os.path.join("/this_directory_must_not_exist/", "foo.dat")
        stop_node(self.nodes[0], 0)
        assert_start_raises_init_error(0, "-wallet=%s" % invalidpath,
            "Error: Absolute path %s does not exist")

        # relative path does not exist
        invalidpath = os.path.join("wallet", "foo.dat")
        assert_start_raises_init_error(0, "-wallet=%s" % invalidpath,
            "Error: Relative path %s does not exist")

        # create dir and retry
        os.mkdir(os.path.join(self.options.tmpdir, "node0", "regtest", "wallet"))
        self.nodes[0] = start_node(0, self.options.tmpdir, ["-wallet=%s" % invalidpath])
Ejemplo n.º 14
0
    def run_test(self):
        # test default log file name
        assert os.path.isfile(
            os.path.join(self.options.tmpdir, "node0", "regtest", "debug.log"))

        # test alternative log file name in datadir
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-debuglogfile=foo.log"])
        assert os.path.isfile(
            os.path.join(self.options.tmpdir, "node0", "regtest", "foo.log"))

        # test alternative log file name outside datadir
        tempname = os.path.join(self.options.tmpdir, "foo.log")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-debuglogfile=%s" % tempname])
        assert os.path.isfile(tempname)

        # check that invalid log (relative) will cause error
        invdir = os.path.join(self.options.tmpdir, "node0", "regtest", "foo")
        invalidname = os.path.join("foo", "foo.log")
        stop_node(self.nodes[0], 0)
        assert_start_raises_init_error(0, "-debuglogfile=%s" % (invalidname),
                                       "Error: Could not open debug log file")
        assert not os.path.isfile(os.path.join(invdir, "foo.log"))

        # check that invalid log (relative) works after path exists
        os.mkdir(invdir)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-debuglogfile=%s" % (invalidname)])
        assert os.path.isfile(os.path.join(invdir, "foo.log"))

        # check that invalid log (absolute) will cause error
        stop_node(self.nodes[0], 0)
        invdir = os.path.join(self.options.tmpdir, "foo")
        invalidname = os.path.join(invdir, "foo.log")
        assert_start_raises_init_error(0, "-debuglogfile=%s" % invalidname,
                                       "Error: Could not open debug log file")

        # check that invalid log (absolute) works after path exists
        os.mkdir(invdir)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ["-debuglogfile=%s" % invalidname])
        assert os.path.isfile(os.path.join(invdir, "foo.log"))
Ejemplo n.º 15
0
    def run_test(self):
        parent = self.nodes[0]
        parent2 = self.nodes[1]
        sidechain = self.nodes[2]
        sidechain2 = self.nodes[3]

        parent.generate(101)
        sidechain.generate(101)

        addrs = sidechain.getpeginaddress()
        addr = addrs["mainchain_address"]
        print('addrs', addrs)
        print(parent.validateaddress(addr))
        txid1 = parent.sendtoaddress(addr, 24)
        # 10+2 confirms required to get into mempool and confirm
        parent.generate(1)
        time.sleep(2)
        proof = parent.gettxoutproof([txid1])

        raw = parent.getrawtransaction(txid1)
        print('raw', parent.getrawtransaction(txid1, True))

        print("Attempting peg-in")
        # First attempt fails the consensus check but gives useful result
        try:
            pegtxid = sidechain.claimpegin(raw, proof)
            raise Exception(
                "Peg-in should not be mature enough yet, need another block.")
        except JSONRPCException as e:
            print('RPC ERROR:', e.error['message'])
            assert (
                "Peg-in Bitcoin transaction needs more confirmations to be sent."
                in e.error["message"])

        # Second attempt simply doesn't hit mempool bar
        parent.generate(10)
        try:
            pegtxid = sidechain.claimpegin(raw, proof)
            raise Exception(
                "Peg-in should not be mature enough yet, need another block.")
        except JSONRPCException as e:
            print('RPC ERROR:', e.error['message'])
            assert (
                "Peg-in Bitcoin transaction needs more confirmations to be sent."
                in e.error["message"])

        try:
            pegtxid = sidechain.createrawpegin(raw, proof, 'AEIOU')
            raise Exception("Peg-in with non-hex claim_script should fail.")
        except JSONRPCException as e:
            print('RPC ERROR:', e.error['message'])
            assert ("Given claim_script is not hex." in e.error["message"])

        # Should fail due to non-matching wallet address
        try:
            scriptpubkey = sidechain.validateaddress(
                get_new_unconfidential_address(sidechain))["scriptPubKey"]
            pegtxid = sidechain.claimpegin(raw, proof, scriptpubkey)
            raise Exception(
                "Peg-in with non-matching claim_script should fail.")
        except JSONRPCException as e:
            print('RPC ERROR:', e.error['message'])
            assert (
                "Given claim_script does not match the given Bitcoin transaction."
                in e.error["message"])

        # 12 confirms allows in mempool
        parent.generate(1)
        # Should succeed via wallet lookup for address match, and when given
        pegtxid1 = sidechain.claimpegin(raw, proof)

        # Will invalidate the block that confirms this transaction later
        self.sync_all()
        blockhash = sidechain2.generate(1)
        self.sync_all()
        sidechain.generate(5)

        tx1 = sidechain.gettransaction(pegtxid1)

        print('tx1', tx1)
        if "confirmations" in tx1 and tx1["confirmations"] == 6:
            print("Peg-in is confirmed: Success!")
        else:
            raise Exception("Peg-in confirmation has failed.")

        # Look at pegin fields
        decoded = sidechain.decoderawtransaction(tx1["hex"])
        assert decoded["vin"][0]["is_pegin"] == True
        assert len(decoded["vin"][0]["pegin_witness"]) > 0
        # Check that there's sufficient fee for the peg-in
        vsize = decoded["vsize"]
        fee_output = decoded["vout"][1]
        fallbackfee_pervbyte = Decimal("0.00001") / Decimal("1000")
        assert fee_output["scriptPubKey"]["type"] == "fee"
        assert fee_output["value"] >= fallbackfee_pervbyte * vsize

        # Quick reorg checks of pegs
        sidechain.invalidateblock(blockhash[0])
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 0:
            raise Exception(
                "Peg-in didn't unconfirm after invalidateblock call.")
        # Re-enters block
        sidechain.generate(1)
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 1:
            raise Exception("Peg-in should have one confirm on side block.")
        sidechain.reconsiderblock(blockhash[0])
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 6:
            raise Exception("Peg-in should be back to 6 confirms.")

        # Do multiple claims in mempool
        n_claims = 6

        print("Flooding mempool with many small claims")
        pegtxs = []
        sidechain.generate(101)

        # Do mixture of raw peg-in and automatic peg-in tx construction
        # where raw creation is done on another node
        for i in range(n_claims):
            addrs = sidechain.getpeginaddress()
            txid = parent.sendtoaddress(addrs["mainchain_address"], 1)
            parent.generate(1)
            proof = parent.gettxoutproof([txid])
            raw = parent.getrawtransaction(txid)
            if i % 2 == 0:
                parent.generate(11)
                pegtxs += [sidechain.claimpegin(raw, proof)]
            else:
                # The raw API doesn't check for the additional 2 confirmation buffer
                # So we only get 10 confirms then send off. Miners will add to block anyways.

                # Don't mature whole way yet to test signing immature peg-in input
                parent.generate(8)
                # Wallet in sidechain2 gets funds instead of sidechain
                raw_pegin = sidechain2.createrawpegin(
                    raw, proof, addrs["claim_script"])["hex"]
                # First node should also be able to make a valid transaction with or without 3rd arg
                # since this wallet originated the claim_script itself
                sidechain.createrawpegin(raw, proof, addrs["claim_script"])
                sidechain.createrawpegin(raw, proof)
                signed_pegin = sidechain.signrawtransaction(raw_pegin)
                assert (signed_pegin["complete"])
                assert ("warning"
                        in signed_pegin)  # warning for immature peg-in
                # fully mature them now
                parent.generate(1)
                pegtxs += [sidechain.sendrawtransaction(signed_pegin["hex"])]

        self.sync_all()
        sidechain2.generate(1)
        for i, pegtxid in enumerate(pegtxs):
            if i % 2 == 0:
                tx = sidechain.gettransaction(pegtxid)
            else:
                tx = sidechain2.gettransaction(pegtxid)
            if "confirmations" not in tx or tx["confirmations"] == 0:
                raise Exception("Peg-in confirmation has failed.")

        print("Test pegout")
        self.test_pegout(get_new_unconfidential_address(parent), sidechain)

        print("Test pegout P2SH")
        parent_chain_addr = get_new_unconfidential_address(parent)
        parent_pubkey = parent.validateaddress(parent_chain_addr)["pubkey"]
        parent_chain_p2sh_addr = parent.createmultisig(
            1, [parent_pubkey])["address"]
        self.test_pegout(parent_chain_p2sh_addr, sidechain)

        print("Test pegout Garbage")
        parent_chain_addr = "garbage"
        try:
            self.test_pegout(parent_chain_addr, sidechain)
            raise Exception("A garbage address should fail.")
        except JSONRPCException as e:
            assert ("Invalid Bitcoin address" in e.error["message"])

        print("Test pegout Garbage valid")
        prev_txid = sidechain.sendtoaddress(sidechain.getnewaddress(), 1)
        sidechain.generate(1)
        pegout_chain = 'a' * 64
        pegout_hex = 'b' * 500
        inputs = [{"txid": prev_txid, "vout": 0}]
        outputs = {"vdata": [pegout_chain, pegout_hex]}
        rawtx = sidechain.createrawtransaction(inputs, outputs)
        raw_pegout = sidechain.decoderawtransaction(rawtx)

        assert 'vout' in raw_pegout and len(raw_pegout['vout']) > 0
        pegout_tested = False
        for output in raw_pegout['vout']:
            scriptPubKey = output['scriptPubKey']
            if 'type' in scriptPubKey and scriptPubKey['type'] == 'nulldata':
                assert ('pegout_hex' in scriptPubKey
                        and 'pegout_asm' in scriptPubKey
                        and 'pegout_type' in scriptPubKey
                        and 'pegout_chain' in scriptPubKey
                        and 'pegout_reqSigs' not in scriptPubKey
                        and 'pegout_addresses' not in scriptPubKey)
                assert scriptPubKey['pegout_type'] == 'nonstandard'
                assert scriptPubKey['pegout_chain'] == pegout_chain
                assert scriptPubKey['pegout_hex'] == pegout_hex
                pegout_tested = True
                break
        assert pegout_tested

        print(
            "Now test failure to validate peg-ins based on intermittant bitcoind rpc failure"
        )
        stop_node(self.nodes[1], 1)
        txid = parent.sendtoaddress(addr, 1)
        parent.generate(12)
        proof = parent.gettxoutproof([txid])
        raw = parent.getrawtransaction(txid)
        stuck_peg = sidechain.claimpegin(raw, proof)
        sidechain.generate(1)
        print("Waiting to ensure block is being rejected by sidechain2")
        time.sleep(5)

        assert (sidechain.getblockcount() != sidechain2.getblockcount())

        print("Restarting parent2")
        self.nodes[1] = start_node(1,
                                   self.options.tmpdir,
                                   self.extra_args[1],
                                   binary=self.binary,
                                   chain=self.parent_chain,
                                   cookie_auth=True)
        parent2 = self.nodes[1]
        connect_nodes_bi(self.nodes, 0, 1)

        # Don't make a block, race condition when pegin-invalid block
        # is awaiting further validation, nodes reject subsequent blocks
        # even ones they create
        print(
            "Now waiting for node to re-evaluate peg-in witness failed block... should take a few seconds"
        )
        self.sync_all()
        print("Completed!\n")
        print("Now send funds out in two stages, partial, and full")
        some_btc_addr = get_new_unconfidential_address(parent)
        bal_1 = sidechain.getwalletinfo()["balance"]["bitcoin"]
        try:
            sidechain.sendtomainchain(some_btc_addr, bal_1 + 1)
            raise Exception("Sending out too much; should have failed")
        except JSONRPCException as e:
            assert ("Insufficient funds" in e.error["message"])

        assert (sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
        try:
            sidechain.sendtomainchain(some_btc_addr + "b", bal_1 - 1)
            raise Exception("Sending to invalid address; should have failed")
        except JSONRPCException as e:
            assert ("Invalid Bitcoin address" in e.error["message"])

        assert (sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
        try:
            sidechain.sendtomainchain("1Nro9WkpaKm9axmcfPVp79dAJU1Gx7VmMZ",
                                      bal_1 - 1)
            raise Exception(
                "Sending to mainchain address when should have been testnet; should have failed"
            )
        except JSONRPCException as e:
            assert ("Invalid Bitcoin address" in e.error["message"])

        assert (sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)

        peg_out_txid = sidechain.sendtomainchain(some_btc_addr, 1)

        peg_out_details = sidechain.decoderawtransaction(
            sidechain.getrawtransaction(peg_out_txid))
        # peg-out, change
        assert (len(peg_out_details["vout"]) == 3)
        found_pegout_value = False
        for output in peg_out_details["vout"]:
            if "value" in output and output["value"] == 1:
                found_pegout_value = True
        assert (found_pegout_value)

        bal_2 = sidechain.getwalletinfo()["balance"]["bitcoin"]
        # Make sure balance went down
        assert (bal_2 + 1 < bal_1)

        sidechain.sendtomainchain(some_btc_addr, bal_2, True)

        assert ("bitcoin" not in sidechain.getwalletinfo()["balance"])

        print('Success!')
    def run_test(self):
        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        # node1 should have 2 connections to node0 at this point
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)
        self.nodes[1].setban("127.0.0.1", "add")
        wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0)
        # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_jsonrpc(
            -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_jsonrpc(
            -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        # still only one banned ip because 127.0.0.1 is within the range of
        # 127.0.0.0/24
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_jsonrpc(
            -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        # ban for 1 seconds
        self.nodes[1].setban("192.168.0.1", "add", 1)
        # ban for 1000 seconds
        self.nodes[1].setban(
            "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        wait_until(lambda: len(self.nodes[1].listbanned()) == 3)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        self.log.info("Test disconnectnode RPCs")

        self.log.info(
            "disconnectnode: fail to disconnect when calling with address and nodeid")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        node1 = self.nodes[0].getpeerinfo()[0]['addr']
        assert_raises_jsonrpc(
            -32602, "Only one of address and nodeid should be provided.",
                              self.nodes[0].disconnectnode, address=address1, nodeid=node1)

        self.log.info(
            "disconnectnode: fail to disconnect when calling with junk address")
        assert_raises_jsonrpc(-29, "Node not found in connected nodes",
                              self.nodes[0].disconnectnode, address="221B Baker Street")

        self.log.info(
            "disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1)
        assert not [node for node in self.nodes[0]
                    .getpeerinfo() if node['addr'] == address1]

        self.log.info("disconnectnode: successfully reconnect node")
        # reconnect the node
        connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [node for node in self.nodes[0]
                .getpeerinfo() if node['addr'] == address1]

        self.log.info(
            "disconnectnode: successfully disconnect node by node id")
        id1 = self.nodes[0].getpeerinfo()[0]['id']
        self.nodes[0].disconnectnode(nodeid=id1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1)
        assert not [node for node in self.nodes[
            0].getpeerinfo() if node['id'] == id1]
Ejemplo n.º 17
0
    def run_test(self):
        # stop node and read wallet.dat
        stop_node(self.nodes[0], 0)
        fresh_wallet_content = self.load_wallet_content(0)

        self.nodes[0] = start_node(0, self.options.tmpdir, ["-elysium"])
        self.connect_to_other(0)

        super().run_test()

        # generate sigma property
        owner = self.addrs[0]

        sigma_start_block = 550
        self.nodes[0].generatetoaddress(
            sigma_start_block - self.nodes[0].getblockcount(), owner)

        self.nodes[0].elysium_sendissuancefixed(owner, 1, 1, 0, '', '',
                                                'Test Sigma', '', '', '3', 1)

        self.nodes[0].generate(10)
        sigmaProperty = 3

        self.nodes[0].elysium_sendcreatedenomination(owner, sigmaProperty, '1')
        self.nodes[0].generate(10)

        # generate two coins and spend one of them
        self.nodes[0].elysium_sendmint(owner, sigmaProperty, {"0": 2})
        self.nodes[0].generate(10)

        for _ in range(10):
            self.nodes[0].mint(1)

        self.nodes[0].generate(10)

        self.nodes[0].elysium_sendspend(owner, sigmaProperty, 0)
        self.nodes[0].generate(10)

        sync_blocks(self.nodes)

        # stop, clear state and restore fresh wallet
        stop_node(self.nodes[0], 0)
        self.clear_datadir(0)

        walletfile = self.get_walletfile(0)
        with open(walletfile, 'wb+') as wf:
            wf.write(fresh_wallet_content)

        # start and sync
        self.nodes[0] = start_node(0, self.options.tmpdir, ["-elysium"])
        self.connect_to_other(0)

        sync_blocks(self.nodes)

        # verify state
        unspents = self.nodes[0].elysium_listmints()
        assert_equal(1, len(unspents))
        assert_equal(
            '2', self.nodes[0].elysium_getbalance(owner,
                                                  sigmaProperty)['balance'])

        self.nodes[0].elysium_sendspend(owner, sigmaProperty, 0)
        self.nodes[0].generate(10)

        unspents = self.nodes[0].elysium_listmints()
        assert_equal(0, len(unspents))
        assert_equal(
            '3', self.nodes[0].elysium_getbalance(owner,
                                                  sigmaProperty)['balance'])
Ejemplo n.º 18
0
    def start_mn(self, mining_node_num, hot_node_num, cold_nodes,
                 num_of_nodes):

        mn_ids = dict()
        mn_aliases = dict()
        mn_collateral_addresses = dict()

        #1
        for ind, num in enumerate(cold_nodes):
            collateral_address = self.nodes[hot_node_num].getnewaddress()
            print(
                f"{ind}: Sending {self.collateral} coins to node {hot_node_num}; collateral address {collateral_address} ..."
            )
            collateral_txid = self.nodes[mining_node_num].sendtoaddress(
                collateral_address, self.collateral, "", "", False)

            self.sync_all()
            self.nodes[mining_node_num].generate(1)
            self.sync_all()

            assert_equal(self.nodes[hot_node_num].getbalance(),
                         self.collateral * (ind + 1))

            # print("node {0} collateral outputs".format(hot_node_num))
            # print(self.nodes[hot_node_num].masternode("outputs"))

            collateralvin = self.nodes[hot_node_num].masternode(
                "outputs")[collateral_txid]
            mn_ids[num] = str(collateral_txid) + "-" + str(collateralvin)
            mn_collateral_addresses[num] = collateral_address

        #2
        print(f"Stopping node {hot_node_num}...")
        stop_node(self.nodes[hot_node_num], hot_node_num)

        #3
        print(f"Creating masternode.conf for node {hot_node_num}...")
        for num, key in cold_nodes.items():
            mn_alias = f"mn{num}"
            c_txid, c_vin = mn_ids[num].split('-')
            print(
                f"{mn_alias}  127.0.0.1:{p2p_port(num)} {key} {c_txid} {c_vin}"
            )
            self.create_masternode_conf(mn_alias, hot_node_num,
                                        self.options.tmpdir, c_txid, c_vin,
                                        key, p2p_port(num))
            mn_aliases[num] = mn_alias

        #4
        print(f"Starting node {hot_node_num}...")
        self.nodes[hot_node_num] = start_node(
            hot_node_num,
            self.options.tmpdir,
            ["-debug=masternode", "-txindex=1", "-reindex"],
            timewait=900)
        for i in range(num_of_nodes):
            if i != hot_node_num:
                connect_nodes_bi(self.nodes, hot_node_num, i)

        print("Waiting 90 seconds...")
        time.sleep(90)
        print(f"Checking sync status of node {hot_node_num}...")
        assert_equal(self.nodes[hot_node_num].mnsync("status")["IsSynced"],
                     True)
        assert_equal(self.nodes[hot_node_num].mnsync("status")["IsFailed"],
                     False)

        #5
        for mn_alias in mn_aliases.values():
            print(f"Enabling MN {mn_alias}...")
            res = self.nodes[hot_node_num].masternode("start-alias", mn_alias)
            print(res)
            assert_equal(res["alias"], mn_alias)
            assert_equal(res["result"], "successful")
            time.sleep(1)

        print("Waiting for PRE_ENABLED...")
        for ind, num in enumerate(mn_ids):
            wait = 30 if ind == 0 else 0
            self.wait_for_mn_state(wait, 10, "PRE_ENABLED",
                                   self.nodes[0:num_of_nodes], mn_ids[num])

        print("Waiting for ENABLED...")
        for ind, num in enumerate(mn_ids):
            wait = 120 if ind == 0 else 0
            self.wait_for_mn_state(wait, 20, "ENABLED",
                                   self.nodes[0:num_of_nodes], mn_ids[num])

        return mn_ids, mn_aliases, mn_collateral_addresses
Ejemplo n.º 19
0
    def run_test(self):
        self.log.info("test -blocknotify")
        block_count = 10
        blocks = self.nodes[1].generate(block_count)

        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(
            self.block_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated blocks hashes
        with open(self.block_filename, 'r') as f:
            assert_equal(sorted(blocks), sorted(f.read().splitlines()))

        self.log.info("test -walletnotify")
        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(
            self.tx_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(
            map(lambda t: t['txid'],
                self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r') as f:
            assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
        os.remove(self.tx_filename)

        self.log.info("test -walletnotify after rescan")
        # restart node to rescan to force wallet notifications
        stop_node(self.nodes[1], 1)
        self.nodes[1] = start_node(1, self.options.tmpdir, self.extra_args[1])

        connect_nodes_bi(self.nodes, 0, 1)

        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(
            self.tx_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(
            map(lambda t: t['txid'],
                self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r') as f:
            assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))

        # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
        self.log.info("test -alertnotify")
        self.nodes[1].generate(41)
        self.sync_all()

        # Give bitcoind 10 seconds to write the alert notification
        wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.
                   getsize(self.alert_filename),
                   timeout=10)

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text = f.read()

        # Mine more up-version blocks, should not get more alerts:
        self.nodes[1].generate(2)
        self.sync_all()

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text2 = f.read()

        self.log.info(
            "-alertnotify should not continue notifying for more unknown version blocks"
        )
        assert_equal(alert_text, alert_text2)
Ejemplo n.º 20
0
    def run_test(self):
        tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        num_of_nodes = 3

        print("=== Test MN basics ===")
        print("Mining blocks on node 1...")
        self.nodes[1].generate(100)
        self.sync_all()

        self.nodes[1].generate(100)
        self.sync_all()

        assert_equal(self.nodes[1].getbalance(),
                     self._reward * 100)  # node_1 has 100 blocks over maturity

        print("=== Test MN activation ===")
        print("Sending 1000 coins to node 2...")
        collateraladdr = self.nodes[2].getnewaddress()
        collateraltxid = self.nodes[1].sendtoaddress(collateraladdr, 1000, "",
                                                     "", False)
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        assert_equal(self.nodes[2].getbalance(),
                     1000)  # node_1 has 100 blocks over maturity

        print("node 2 collateral outputs")
        print(self.nodes[2].masternode("outputs"))

        collateralvin = self.nodes[2].masternode("outputs")[collateraltxid]

        print("Stopping node 2...")
        stop_node(self.nodes[2], 2)

        mnId = str(collateraltxid) + "-" + str(collateralvin)

        print("Creating masternode.conf for node 2...")
        create_masternode_conf(
            2, self.options.tmpdir, collateraltxid, collateralvin,
            "91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe", p2p_port(0))

        print("Starting node 2...")
        self.nodes[2] = start_node(
            2,
            self.options.tmpdir,
            ["-debug=masternode", "-txindex=1", "-reindex"],
            timewait=900)
        connect_nodes_bi(self.nodes, 2, 0)
        connect_nodes_bi(self.nodes, 2, 1)

        print("Wating 90 seconds...")
        time.sleep(90)

        print("Checking sync status of node 2...")
        # print(self.nodes[2].mnsync("status"))
        assert_equal(self.nodes[2].mnsync("status")["IsBlockchainSynced"],
                     True)
        assert_equal(self.nodes[2].mnsync("status")["IsMasternodeListSynced"],
                     True)
        assert_equal(self.nodes[2].mnsync("status")["IsSynced"], True)
        assert_equal(self.nodes[2].mnsync("status")["IsFailed"], False)

        print("Enabling MN...")
        res = self.nodes[2].masternode("start-alias", "mn1")
        print(res)
        assert_equal(res["alias"], "mn1")
        assert_equal(res["result"], "successful")

        wait_for_it(30, 10, "PRE_ENABLED", self.nodes[0:num_of_nodes], mnId)
        wait_for_it(120, 20, "ENABLED", self.nodes[0:num_of_nodes], mnId)

        #print("Test sync after crash")
        # 1. kill (not gracefully) node0 (masternode)
        # 2. start node0 again
        # 3. Check all nodes
        # wait_for_it(120, 20, "ENABLED", self.nodes[0:num_of_nodes], mnId)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        if 'cache' in tests:
            print("=== Test cache save/load ===")
            print("Stopping node 1...")
            stop_node(self.nodes[1], 1)
            print("Starting node 1...")
            self.nodes.append(
                start_node(1, self.options.tmpdir, ["-debug=masternode"]))
            connect_nodes_bi(self.nodes, 1, 0)
            connect_nodes_bi(self.nodes, 1, 2)

            wait_for_it(10, 10, "ENABLED", self.nodes[0:num_of_nodes], mnId)

            #Test disk cache 2
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)
            print("Starting node 0 as Masternode...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)

            wait_for_it(20, 10, "ENABLED", self.nodes[0:num_of_nodes], mnId)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        if 'sync' in tests:
            print("=== Test MN list sync ===")
            print("Test new node sync")
            print("Starting node 3...")
            self.nodes.append(
                start_node(3, self.options.tmpdir, ["-debug=masternode"]))
            connect_nodes_bi(self.nodes, 3, 0)
            connect_nodes_bi(self.nodes, 3, 1)
            connect_nodes_bi(self.nodes, 3, 2)
            num_of_nodes = 4

            wait_for_it(20, 10, "ENABLED", self.nodes[0:num_of_nodes], mnId)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        if 'ping' in tests:
            print("=== Test Ping ===")
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)

            wait_for_it(150, 50, "EXPIRED", self.nodes[1:num_of_nodes], mnId)

            print("Starting node 0 as Masternode again...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)
            if num_of_nodes > 3:
                connect_nodes_bi(self.nodes, 0, 3)

            wait_for_it(120, 20, "ENABLED", self.nodes[0:num_of_nodes], mnId)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        if 'restart' in tests:
            print("=== Test 'restart required' ===")
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)

            wait_for_it(150, 50, "EXPIRED", self.nodes[1:num_of_nodes], mnId)
            wait_for_it(360, 30, "NEW_START_REQUIRED",
                        self.nodes[1:num_of_nodes], mnId)

            print("Starting node 0 as Masternode again...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)
            if num_of_nodes > 3:
                connect_nodes_bi(self.nodes, 0, 3)

            print("Enabling node 0 as MN again (start-alias from node 2)...")
            res = self.nodes[2].masternode("start-alias", "mn1")
            print(res)
            assert_equal(res["alias"], "mn1")
            assert_equal(res["result"], "successful")

            # wait_for_it(30, 10, "PRE_ENABLED", self.nodes[0:num_of_nodes], mnId, 6)
            wait_for_it(120, 20, "ENABLED", self.nodes[0:num_of_nodes], mnId,
                        3)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent']
        if 'spent' in tests:
            print("=== Test MN Spent ===")

            assert_equal(self.nodes[2].getbalance(), 1000)
            usp = self.nodes[2].listlockunspent()
            print("{0}-{1}".format(usp[0]['txid'], usp[0]['vout']))
            assert_equal(usp[0]['txid'], collateraltxid)
            assert_equal(Decimal(usp[0]['vout']), Decimal(collateralvin))

            print("Unlocking locked output...")
            locked = [{"txid": usp[0]['txid'], "vout": usp[0]['vout']}]
            assert_equal(self.nodes[2].lockunspent(True, locked), True)

            print("Sending 100 coins from node 2 to node 1...")
            newaddr = self.nodes[1].getnewaddress()
            newtxid = self.nodes[2].sendtoaddress(newaddr, 100, "", "", False)
            self.sync_all()
            self.nodes[1].generate(1)
            self.sync_all()

            newbal = self.nodes[2].getbalance()
            print(newbal)
            assert_greater_than(Decimal("1000"), Decimal(newbal))

            print(self.nodes[0].masternode("status")["status"])
            wait_for_it(10, 10, "OUTPOINT_SPENT", self.nodes[0:num_of_nodes],
                        mnId, 3)

            for _ in range(10):
                result = self.nodes[0].masternode("status")["status"]
                if result != "Not capable masternode: Masternode not in masternode list":
                    print(result)
                    print('Wating 20 seconds...')
                    time.sleep(20)
                else:
                    break

            print(self.nodes[0].masternode("status")["status"])
            assert_equal(
                self.nodes[0].masternode("status")["status"],
                "Not capable masternode: Masternode not in masternode list")

        print("All set...")
Ejemplo n.º 21
0
    def run_test(self):
        print "Mining blocks..."

        self.nodes[0].generate(4)

        walletinfo = self.nodes[0].getwalletinfo()
        assert_equal(walletinfo['immature_balance'], self._reward * 4)
        assert_equal(walletinfo['balance'], 0)

        self.sync_all()
        self.nodes[1].generate(101)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), self._reward * 4)
        assert_equal(self.nodes[1].getbalance(), self._reward)
        assert_equal(self.nodes[2].getbalance(), 0)
        assert_equal(self.nodes[3].getbalance(), 0)

        check_value_pool(self.nodes[0], 'sprout', 0)
        check_value_pool(self.nodes[1], 'sprout', 0)
        check_value_pool(self.nodes[2], 'sprout', 0)
        check_value_pool(self.nodes[3], 'sprout', 0)

        errorString = ""
        # # Send will fail because we are enforcing the consensus rule that
        # # coinbase utxos can only be sent to a zaddr.
        # try:
        #     self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1)
        # except JSONRPCException,e:
        #     errorString = e.error['message']
        # assert_equal("Coinbase funds can only be sent to a zaddr" in errorString, True)

        # Prepare to send taddr->zaddr
        mytaddr = self.nodes[0].getnewaddress()
        myzaddr = self.nodes[0].z_getnewaddress()

        # Node 3 will test that watch only address utxos are not selected
        self.nodes[3].importaddress(mytaddr)
        recipients = [{"address": myzaddr, "amount": Decimal('1')}]
        myopid = self.nodes[3].z_sendmany(mytaddr, recipients)
        errorString = ""
        status = None
        opids = [myopid]
        timeout = 10
        for x in xrange(1, timeout):
            results = self.nodes[3].z_getoperationresult(opids)
            if len(results) == 0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                errorString = results[0]["error"]["message"]
                break
        assert_equal("failed", status)
        assert_equal("no UTXOs found for taddr from address" in errorString,
                     True)
        stop_node(self.nodes[3], 3)
        self.nodes.pop()

        # This send will fail because our wallet does not allow any change when protecting a coinbase utxo,
        # as it's currently not possible to specify a change address in z_sendmany.
        recipients = []
        recipients.append({"address": myzaddr, "amount": Decimal('1.23456')})
        errorString = ""
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        opids = []
        opids.append(myopid)
        timeout = 10
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[0].z_getoperationresult(opids)
            if len(results) == 0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                errorString = results[0]["error"]["message"]

                # Test that the returned status object contains a params field with the operation's input parameters
                assert_equal(results[0]["method"], "z_sendmany")
                params = results[0]["params"]
                assert_equal(params["fee"], self._fee)  # default
                assert_equal(params["minconf"], Decimal('1'))  # default
                assert_equal(params["fromaddress"], mytaddr)
                assert_equal(params["amounts"][0]["address"], myzaddr)
                assert_equal(params["amounts"][0]["amount"],
                             Decimal('1.23456'))
                break
        assert_equal("failed", status)
        assert_equal("wallet does not allow any change" in errorString, True)

        # This send will succeed.  We send two coinbase utxos totalling 20.0 less a fee of 0.10000, with no change.
        shieldvalue = self._reward * 2 - self._fee
        recipients = []
        recipients.append({"address": myzaddr, "amount": shieldvalue})
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # Verify that debug=zrpcunsafe logs params, and that full txid is associated with opid
        logpath = self.options.tmpdir + "/node0/regtest/debug.log"
        logcounter = 0
        with open(logpath, "r") as myfile:
            logdata = myfile.readlines()
        for logline in logdata:
            if myopid + ": z_sendmany initialized" in logline and mytaddr in logline and myzaddr in logline:
                assert_equal(logcounter, 0)  # verify order of log messages
                logcounter = logcounter + 1
            if myopid + ": z_sendmany finished" in logline and mytxid in logline:
                assert_equal(logcounter, 1)
                logcounter = logcounter + 1
        assert_equal(logcounter, 2)

        # check balances (the z_sendmany consumes 3 coinbase utxos)
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), self._reward * 2)
        assert_equal(Decimal(resp["private"]), self._reward * 2 - self._fee)
        assert_equal(Decimal(resp["total"]), self._reward * 4 - self._fee)

        # The Sprout value pool should reflect the send
        sproutvalue = shieldvalue
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # A custom fee of 0 is okay.  Here the node will send the note value back to itself.
        recipients = []
        recipients.append({
            "address": myzaddr,
            "amount": self._reward * 2 - self._fee
        })
        myopid = self.nodes[0].z_sendmany(myzaddr, recipients, 1,
                                          Decimal('0.0'))
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), self._reward * 2)
        assert_equal(Decimal(resp["private"]), self._reward * 2 - self._fee)
        assert_equal(Decimal(resp["total"]), self._reward * 4 - self._fee)

        # The Sprout value pool should be unchanged
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # convert note to transparent funds
        unshieldvalue = self._reward
        recipients = []
        recipients.append({"address": mytaddr, "amount": unshieldvalue})
        myopid = self.nodes[0].z_sendmany(myzaddr, recipients)
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        assert (mytxid is not None)
        self.sync_all()

        # check that priority of the tx sending from a zaddr is not 0
        mempool = self.nodes[0].getrawmempool(True)
        assert (Decimal(mempool[mytxid]['startingpriority']) >=
                Decimal('1000000000000'))

        self.nodes[1].generate(1)
        self.sync_all()

        # check balances
        sproutvalue -= unshieldvalue + self._fee
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), self._reward * 3)
        assert_equal(Decimal(resp["private"]), self._reward - self._fee * 2)
        assert_equal(Decimal(resp["total"]), self._reward * 4 - self._fee * 2)
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # z_sendmany will return an error if there is transparent change output considered dust.
        # UTXO selection in z_sendmany sorts in ascending order, so smallest utxos are consumed first.
        # At this point in time, unspent notes all have a value of self._reward and standard z_sendmany fee is self._fee.
        recipients = []
        amount = self._reward - self._fee - self._atoshi  # this leaves change at 1 zatoshi less than dust threshold
        recipients.append({
            "address": self.nodes[0].getnewaddress(),
            "amount": amount
        })
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        wait_and_assert_operationid_status(
            self.nodes[0], myopid, "failed",
            "Insufficient transparent funds, have " + str(self._reward00) +
            ", need 0.00053 more to avoid creating invalid change output 0.00001 (dust threshold is 0.00054)"
        )

        # Send will fail because send amount is too big, even when including coinbase utxos
        errorString = ""
        try:
            self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 99999)
        except JSONRPCException, e:
            errorString = e.error['message']
Ejemplo n.º 22
0
    def run_test(self):
        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        # node1 should have 2 connections to node0 at this point
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)
        self.nodes[1].setban("127.0.0.1", "add")
        wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0)
        # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_jsonrpc(-23, "IP/Subnet already banned",
                              self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet",
                              self.nodes[1].setban, "127.0.0.1/42", "add")
        # still only one banned ip because 127.0.0.1 is within the range of
        # 127.0.0.0/24
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban,
                              "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        # ban for 1 seconds
        self.nodes[1].setban("192.168.0.1", "add", 1)
        # ban for 1000 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19",
                             "add", 1000)
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        wait_until(lambda: len(self.nodes[1].listbanned()) == 3)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        self.log.info("Test disconnectnode RPCs")

        self.log.info(
            "disconnectnode: fail to disconnect when calling with address and nodeid"
        )
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        node1 = self.nodes[0].getpeerinfo()[0]['addr']
        assert_raises_jsonrpc(
            -32602,
            "Only one of address and nodeid should be provided.",
            self.nodes[0].disconnectnode,
            address=address1,
            nodeid=node1)

        self.log.info(
            "disconnectnode: fail to disconnect when calling with junk address"
        )
        assert_raises_jsonrpc(-29,
                              "Node not found in connected nodes",
                              self.nodes[0].disconnectnode,
                              address="221B Baker Street")

        self.log.info(
            "disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1)
        assert not [
            node
            for node in self.nodes[0].getpeerinfo() if node['addr'] == address1
        ]

        self.log.info("disconnectnode: successfully reconnect node")
        # reconnect the node
        connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [
            node for node in self.nodes[0].getpeerinfo()
            if node['addr'] == address1
        ]

        self.log.info(
            "disconnectnode: successfully disconnect node by node id")
        id1 = self.nodes[0].getpeerinfo()[0]['id']
        self.nodes[0].disconnectnode(nodeid=id1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1)
        assert not [
            node for node in self.nodes[0].getpeerinfo() if node['id'] == id1
        ]
Ejemplo n.º 23
0
    def run_test(self):
        tests = ['cache', 'sync', 'ping', 'restart', 'spent', 'fee']

        print("=== Test MN basics ===")
        self.mining_enough(1, 2)

        print("=== Test MN activation ===")
        cold_nodes = {k: v for k, v in enumerate(private_keys_list)}
        mn_ids, mn_aliases, _ = self.start_mn(self.mining_node_num,
                                              self.hot_node_num, cold_nodes,
                                              self.total_number_of_nodes)
        mn_id = mn_ids[self.cold_node_num]
        mn_alias = mn_aliases[self.cold_node_num]

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'fee' in tests:
            self.storagefee_tests()

        #print("Test sync after crash")
        # 1. kill (not gracefully) node0 (masternode)
        # 2. start node0 again
        # 3. Check all nodes
        # self.wait_for_mn_state(120, 20, "ENABLED", self.nodes[0:self.total_number_of_nodes], mn_id)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'cache' in tests:
            print("=== Test cache save/load ===")
            print("Stopping node 1...")
            stop_node(self.nodes[1], 1)
            print("Starting node 1...")
            self.nodes.append(
                start_node(1, self.options.tmpdir, ["-debug=masternode"]))
            connect_nodes_bi(self.nodes, 1, 0)
            connect_nodes_bi(self.nodes, 1, 2)
            self.sync_all()

            self.wait_for_mn_state(10, 10, "ENABLED",
                                   self.nodes[0:self.total_number_of_nodes],
                                   mn_id)

            #Test disk cache 2
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)
            print("Starting node 0 as Masternode...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)
            self.sync_all()

            self.wait_for_mn_state(20, 10, "ENABLED",
                                   self.nodes[0:self.total_number_of_nodes],
                                   mn_id)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'sync' in tests:
            print("=== Test MN list sync ===")
            print("Test new node sync")
            print("Starting node 3...")
            self.nodes.append(
                start_node(3, self.options.tmpdir, ["-debug=masternode"]))
            connect_nodes_bi(self.nodes, 3, 0)
            connect_nodes_bi(self.nodes, 3, 1)
            connect_nodes_bi(self.nodes, 3, 2)
            self.total_number_of_nodes = 4
            self.sync_all()

            self.wait_for_mn_state(20, 10, "ENABLED",
                                   self.nodes[0:self.total_number_of_nodes],
                                   mn_id)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'ping' in tests:
            print("=== Test Ping ===")
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)

            self.wait_for_mn_state(150, 50, "EXPIRED",
                                   self.nodes[1:self.total_number_of_nodes],
                                   mn_id)

            print("Starting node 0 as Masternode again...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)
            if self.total_number_of_nodes > 3:
                connect_nodes_bi(self.nodes, 0, 3)
            self.sync_all()

            self.wait_for_mn_state(120, 20, "ENABLED",
                                   self.nodes[0:self.total_number_of_nodes],
                                   mn_id)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'restart' in tests:
            print("=== Test 'restart required' ===")
            print("Stopping node 0 - Masternode...")
            stop_node(self.nodes[0], 0)

            self.wait_for_mn_state(150, 50, "EXPIRED",
                                   self.nodes[1:self.total_number_of_nodes],
                                   mn_id)
            self.wait_for_mn_state(360, 30, "NEW_START_REQUIRED",
                                   self.nodes[1:self.total_number_of_nodes],
                                   mn_id)
            # regtest, the NEW_START_REQUIRED masternode is longer than 10 minutes will not be shown.
            self.wait_for_mn_state(240, 30, "",
                                   self.nodes[1:self.total_number_of_nodes],
                                   mn_id)

            print("Starting node 0 as Masternode again...")
            self.nodes.append(
                start_node(0, self.options.tmpdir, [
                    "-debug=masternode", "-masternode", "-txindex=1",
                    "-reindex",
                    "-masternodeprivkey=91sY9h4AQ62bAhNk1aJ7uJeSnQzSFtz7QmW5imrKmiACm7QJLXe"
                ]))
            connect_nodes_bi(self.nodes, 0, 1)
            connect_nodes_bi(self.nodes, 0, 2)
            if self.total_number_of_nodes > 3:
                connect_nodes_bi(self.nodes, 0, 3)
            self.sync_all()

            print("Enabling node 0 as MN again (start-alias from node 2)...")
            res = self.nodes[2].masternode("start-alias", mn_alias)
            print(res)
            assert_equal(res["alias"], mn_alias)
            assert_equal(res["result"], "successful")

            # self.wait_for_mn_state(30, 10, "PRE_ENABLED", self.nodes[0:self.total_number_of_nodes], mn_id, 6)
            self.wait_for_mn_state(120, 20, "ENABLED",
                                   self.nodes[0:self.total_number_of_nodes],
                                   mn_id, 5)

        # tests = ['cache', 'sync', 'ping', 'restart', 'spent', "fee"]
        if 'spent' in tests:
            print("=== Test MN Spent ===")

            assert_equal(self.nodes[2].getbalance(), self.collateral)
            usp = self.nodes[2].listlockunspent()
            print("{0}-{1}".format(usp[0]['txid'], usp[0]['vout']))
            callateral_outpoint = mn_id.split('-')
            assert_equal(usp[0]['txid'], callateral_outpoint[0])
            assert_equal(Decimal(usp[0]['vout']),
                         Decimal(callateral_outpoint[1]))

            print("Unlocking locked output...")
            locked = [{"txid": usp[0]['txid'], "vout": usp[0]['vout']}]
            assert_equal(self.nodes[2].lockunspent(True, locked), True)

            print("Sending 100 coins from node 2 to node 1...")
            newaddr = self.nodes[1].getnewaddress()
            self.nodes[2].sendtoaddress(newaddr, 100, "", "", False)
            self.sync_all()
            self.nodes[1].generate(1)
            self.sync_all()

            balance = self.nodes[2].getbalance()
            print(balance)
            assert_greater_than(Decimal(str(self.collateral)),
                                Decimal(balance))

            print(self.nodes[0].masternode("status")["status"])
            # self.wait_for_mn_state(10, 10, "OUTPOINT_SPENT", self.nodes[0:self.total_number_of_nodes], mn_id, 3)

            for _ in range(10):
                result = self.nodes[0].masternode("status")["status"]
                if result != "Not capable masternode: Masternode not in masternode list":
                    print(result)
                    print('Waiting 20 seconds...')
                    time.sleep(20)
                else:
                    break

            print(self.nodes[0].masternode("status")["status"])
            assert_equal(
                self.nodes[0].masternode("status")["status"],
                "Not capable masternode: Masternode not in masternode list")

        print("All set...")
Ejemplo n.º 24
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[2].getpeerinfo()),
                     4)  #we should have 4 nodes at this point
        self.nodes[2].setban("127.0.0.1", "add")
        time.sleep(3)  #wait till the nodes are disconnected
        assert_equal(len(self.nodes[2].getpeerinfo()),
                     0)  #all nodes must be disconnected at this point
        assert_equal(len(self.nodes[2].listbanned()), 1)
        self.nodes[2].clearbanned()
        assert_equal(len(self.nodes[2].listbanned()), 0)
        self.nodes[2].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[2].listbanned()), 1)
        try:
            self.nodes[2].setban(
                "127.0.0.1", "add"
            )  #throws exception because 127.0.0.1 is within range 127.0.0.0/24
        except:
            pass
        assert_equal(
            len(self.nodes[2].listbanned()), 1
        )  #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        try:
            self.nodes[2].setban("127.0.0.1", "remove")
        except:
            pass
        assert_equal(len(self.nodes[2].listbanned()), 1)
        self.nodes[2].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[2].listbanned()), 0)
        self.nodes[2].clearbanned()
        assert_equal(len(self.nodes[2].listbanned()), 0)

        ##test persisted banlist
        self.nodes[2].setban("127.0.0.0/32", "add")
        self.nodes[2].setban("127.0.0.0/24", "add")
        self.nodes[2].setban("192.168.0.1", "add", 1)  #ban for 1 seconds
        self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19",
                             "add", 1000)  #ban for 1000 seconds
        listBeforeShutdown = self.nodes[2].listbanned()
        assert_equal("192.168.0.1/32",
                     listBeforeShutdown[2]['address'])  #must be here
        time.sleep(2)  #make 100% sure we expired 192.168.0.1 node time

        #stop node
        stop_node(self.nodes[2], 2)

        self.nodes[2] = start_node(2, self.options.tmpdir)
        listAfterShutdown = self.nodes[2].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        ###########################
        # RPC disconnectnode test #
        ###########################
        url = urllib.parse.urlparse(self.nodes[1].url)
        self.nodes[0].disconnectnode(url.hostname + ":" + str(p2p_port(1)))
        time.sleep(2)  #disconnecting a node needs a little bit of time
        for node in self.nodes[0].getpeerinfo():
            assert (node['addr'] != url.hostname + ":" + str(p2p_port(1)))

        connect_nodes_bi(self.nodes, 0, 1)  #reconnect the node
        found = False
        for node in self.nodes[0].getpeerinfo():
            if node['addr'] == url.hostname + ":" + str(p2p_port(1)):
                found = True
        assert (found)
Ejemplo n.º 25
0
class AsyncProofVerifierTest(BitcoinTestFramework):
    def setup_chain(self):
        print("Initializing test directory " + self.options.tmpdir)
        initialize_chain_clean(self.options.tmpdir, NUMB_OF_NODES)

    def setup_network(self, split=False):
        self.nodes = start_nodes(
            NUMB_OF_NODES,
            self.options.tmpdir,
            extra_args=[
                [
                    "-forcelocalban", "-sccoinsmaturity=0", '-logtimemicros=1',
                    '-debug=sc', '-debug=py', '-debug=mempool', '-debug=net',
                    '-debug=bench', '-debug=cert'
                ],
                [
                    "-forcelocalban", "-sccoinsmaturity=0", '-logtimemicros=1',
                    '-debug=sc', '-debug=py', '-debug=mempool', '-debug=net',
                    '-debug=bench', '-debug=cert'
                ],
                # Skip proof verification for the last node
                [
                    "-forcelocalban", "-skipscproof", "-sccoinsmaturity=0",
                    '-logtimemicros=1', '-debug=sc', '-debug=py',
                    '-debug=mempool', '-debug=net', '-debug=bench',
                    '-debug=cert'
                ]
            ])

        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        self.is_network_split = split
        self.sync_all()

    # Retrieves the first unspent UTXO from a node excluding the ones spent as input
    # of another transaction.
    #
    # This is particularly useful if we can't rely on zzz because we can't call the
    # sync_all() function.
    def get_first_unspent_utxo_excluding(self, node_index,
                                         excluding_transaction_ids):

        recently_spent = []

        # Save all the inputs spent by the last transaction
        for txid in excluding_transaction_ids:
            last_tx_vin = self.nodes[node_index].getrawtransaction(txid,
                                                                   1)['vin']

            for input_entry in last_tx_vin:
                recently_spent.append(
                    (input_entry['txid'], input_entry['vout']))

        # Take the first unspent UTXO
        list_unspent = self.nodes[0].listunspent()
        counter = 0
        utxo = list_unspent[counter]

        # Loop until we find an unspent UTXO not included in the input list of the excluding transaction
        while (utxo['txid'], utxo['vout']) in recently_spent:
            counter = counter + 1
            utxo = list_unspent[counter]

        return utxo

    def run_test(self):
        '''
        Verify that the async proof verifier for sidechain proofs works as expected.
        '''

        # Prepare some coins
        self.nodes[0].generate(MINIMAL_SC_HEIGHT / 2 + 1)
        self.sync_all()

        # Generate some coins on node 2
        self.nodes[2].generate(MINIMAL_SC_HEIGHT / 2 + 1)
        self.sync_all()

        sc_address = "0000000000000000000000000000000000000000000000000000000000000abc"
        sc_epoch_len = EPOCH_LENGTH
        sc_cr_amount = Decimal('12.00000000')

        cert_mc_test = CertTestUtils(self.options.tmpdir, self.options.srcdir)
        csw_mc_test = CSWTestUtils(self.options.tmpdir, self.options.srcdir)

        # generate wCertVk and constant
        vk = cert_mc_test.generate_params("sc")
        csw_vk = csw_mc_test.generate_params("sc")
        constant = generate_random_field_element_hex()

        sc_cr = []
        sc_cr.append({
            "version": 0,
            "epoch_length": sc_epoch_len,
            "amount": sc_cr_amount,
            "address": sc_address,
            "wCertVk": vk,
            "wCeasedVk": csw_vk,
            "constant": constant
        })

        rawtx = self.nodes[0].createrawtransaction([], {}, [], sc_cr)
        funded_tx = self.nodes[0].fundrawtransaction(rawtx)
        sig_raw_tx = self.nodes[0].signrawtransaction(funded_tx['hex'])
        final_raw_tx = self.nodes[0].sendrawtransaction(sig_raw_tx['hex'])
        self.sync_all()

        decoded_tx = self.nodes[1].getrawtransaction(final_raw_tx, 1)
        scid = decoded_tx['vsc_ccout'][0]['scid']
        scid_swapped = swap_bytes(scid)
        mark_logs("created SC id: {}".format(scid), self.nodes, DEBUG_MODE)

        # Advance one epoch
        mark_logs("\nLet 1 epoch pass by...", self.nodes, DEBUG_MODE)

        cert1, epoch_number = advance_epoch(cert_mc_test, self.nodes[0],
                                            self.sync_all, scid, "sc",
                                            constant, sc_epoch_len)

        mark_logs(
            "\n==> certificate for SC epoch {} {}".format(epoch_number, cert1),
            self.nodes, DEBUG_MODE)

        # Check that the certificate is in the mempool
        mark_logs("Check certificate is in mempool...", self.nodes, DEBUG_MODE)
        assert_true(cert1 in self.nodes[0].getrawmempool())
        assert_true(cert1 in self.nodes[1].getrawmempool())

        # Generate blocks to reach the next epoch
        mark_logs("\nLet another epoch pass by...", self.nodes, DEBUG_MODE)
        self.nodes[0].generate(sc_epoch_len)
        self.sync_all()

        # Check that the certificate is not in the mempool anymore
        mark_logs("Check certificate is not in mempool anymore...", self.nodes,
                  DEBUG_MODE)
        assert_false(cert1 in self.nodes[0].getrawmempool())
        assert_false(cert1 in self.nodes[1].getrawmempool())

        epoch_number, epoch_cum_tree_hash = get_epoch_data(
            scid, self.nodes[0], sc_epoch_len)
        cert_quality = 1
        cert_fee = Decimal("0.00001")
        ft_fee = 0
        mbtr_fee = 0

        # Manually create a certificate with invalid proof to test the ban mechanism
        # mark_logs("\nTest the node ban mechanism by sending a certificate with invalid proof", self.nodes, DEBUG_MODE)

        # Create an invalid proof by providing the wrong epoch_number
        proof = cert_mc_test.create_test_proof("sc", scid_swapped,
                                               epoch_number + 1, cert_quality,
                                               mbtr_fee, ft_fee,
                                               epoch_cum_tree_hash, constant,
                                               [], [])

        try:
            # The send_certificate call must be ok since the proof verification is disabled on node 2
            invalid_cert = self.nodes[2].sc_send_certificate(
                scid, epoch_number, cert_quality, epoch_cum_tree_hash, proof,
                [], ft_fee, mbtr_fee, cert_fee)
        except JSONRPCException, e:
            error_string = e.error['message']
            print "Send certificate failed with reason {}".format(error_string)
            assert (False)

        mark_logs(
            "\n==> certificate for SC epoch {} {}".format(
                epoch_number, invalid_cert), self.nodes, DEBUG_MODE)

        # Check that the certificate is in node 2 mempool
        assert_true(invalid_cert in self.nodes[2].getrawmempool())

        # Wait until the other nodes process the certificate relayed by node 2
        mark_logs(
            "\nWait for the certificate to be relayed by node 2 and processd by node 1",
            self.nodes, DEBUG_MODE)
        time.sleep(MEMPOOL_LONG_WAIT_TIME)

        # Check that the other nodes didn't accept the certificate containing the wrong proof
        mark_logs(
            "\nCheck that node 1 and node 2 didn't receive/accept the invalid certificate",
            self.nodes, DEBUG_MODE)
        assert_false(invalid_cert in self.nodes[0].getrawmempool())
        assert_false(invalid_cert in self.nodes[1].getrawmempool())

        # Check that the node 1 (the only one connected to node 2) has banned node 2
        mark_logs("\nCheck that node 1 has banned node 2", self.nodes,
                  DEBUG_MODE)
        assert_equal(len(self.nodes[1].listbanned()), 1)

        # Remove node 2 from banned list
        self.nodes[0].clearbanned()
        self.nodes[1].clearbanned()

        mark_logs("\nStop node 2", self.nodes, DEBUG_MODE)
        stop_node(self.nodes[2], 2)
        self.nodes.pop()

        self.sync_all()

        # Create the valid proof
        proof = cert_mc_test.create_test_proof("sc", scid_swapped,
                                               epoch_number, cert_quality,
                                               mbtr_fee, ft_fee,
                                               epoch_cum_tree_hash, constant,
                                               [], [])

        try:
            cert2 = self.nodes[0].sc_send_certificate(scid, epoch_number,
                                                      cert_quality,
                                                      epoch_cum_tree_hash,
                                                      proof, [], ft_fee,
                                                      mbtr_fee, cert_fee)
        except JSONRPCException, e:
            error_string = e.error['message']
            print "Send certificate failed with reason {}".format(error_string)
            assert (False)
class WalletProtectCoinbaseTest(BitcoinTestFramework):
    def setup_chain(self):
        print("Initializing test directory " + self.options.tmpdir)
        initialize_chain_clean(self.options.tmpdir, 4)

    # Start nodes with -regtestprotectcoinbase to set fCoinbaseMustBeProtected to true.
    def setup_network(self, split=False):
        self.nodes = start_nodes(
            4,
            self.options.tmpdir,
            extra_args=[['-regtestprotectcoinbase', '-debug=zrpcunsafe']] * 4)
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        connect_nodes_bi(self.nodes, 0, 2)
        connect_nodes_bi(self.nodes, 0, 3)
        self.is_network_split = False
        self.sync_all()

    # Returns txid if operation was a success or None
    def wait_and_assert_operationid_status(self,
                                           myopid,
                                           in_status='success',
                                           in_errormsg=None):
        print('waiting for async operation {}'.format(myopid))
        opids = []
        opids.append(myopid)
        timeout = 300
        status = None
        errormsg = None
        txid = None
        for x in xrange(1, timeout):
            results = self.nodes[0].z_getoperationresult(opids)
            if len(results) == 0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                if status == "failed":
                    errormsg = results[0]['error']['message']
                elif status == "success":
                    txid = results[0]['result']['txid']
                break
        print('...returned status: {}'.format(status))
        assert_equal(in_status, status)
        if errormsg is not None:
            assert (in_errormsg is not None)
            assert_equal(in_errormsg in errormsg, True)
            print('...returned error: {}'.format(errormsg))
        return txid

    def run_test(self):
        print "Mining blocks..."

        self.nodes[0].generate(4)

        walletinfo = self.nodes[0].getwalletinfo()
        assert_equal(walletinfo['immature_balance'], 40)
        assert_equal(walletinfo['balance'], 0)

        self.sync_all()
        self.nodes[1].generate(101)
        self.sync_all()

        assert_equal(self.nodes[0].getbalance(), 40)
        assert_equal(self.nodes[1].getbalance(), 10)
        assert_equal(self.nodes[2].getbalance(), 0)
        assert_equal(self.nodes[3].getbalance(), 0)

        check_value_pool(self.nodes[0], 'sprout', 0)
        check_value_pool(self.nodes[1], 'sprout', 0)
        check_value_pool(self.nodes[2], 'sprout', 0)
        check_value_pool(self.nodes[3], 'sprout', 0)

        # Send will fail because we are enforcing the consensus rule that
        # coinbase utxos can only be sent to a zaddr.
        errorString = ""
        try:
            self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1)
        except JSONRPCException, e:
            errorString = e.error['message']
        assert_equal(
            "Coinbase funds can only be sent to a zaddr" in errorString, True)

        # Prepare to send taddr->zaddr
        mytaddr = self.nodes[0].getnewaddress()
        myzaddr = self.nodes[0].z_getnewaddress()

        # Node 3 will test that watch only address utxos are not selected
        self.nodes[3].importaddress(mytaddr)
        recipients = [{"address": myzaddr, "amount": Decimal('1')}]
        myopid = self.nodes[3].z_sendmany(mytaddr, recipients)
        errorString = ""
        status = None
        opids = [myopid]
        timeout = 10
        for x in xrange(1, timeout):
            results = self.nodes[3].z_getoperationresult(opids)
            if len(results) == 0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                errorString = results[0]["error"]["message"]
                break
        assert_equal("failed", status)
        assert_equal("no UTXOs found for taddr from address" in errorString,
                     True)
        stop_node(self.nodes[3], 3)
        self.nodes.pop()

        # This send will fail because our wallet does not allow any change when protecting a coinbase utxo,
        # as it's currently not possible to specify a change address in z_sendmany.
        recipients = []
        recipients.append({
            "address": myzaddr,
            "amount": Decimal('1.23456789')
        })
        errorString = ""
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        opids = []
        opids.append(myopid)
        timeout = 10
        status = None
        for x in xrange(1, timeout):
            results = self.nodes[0].z_getoperationresult(opids)
            if len(results) == 0:
                time.sleep(1)
            else:
                status = results[0]["status"]
                errorString = results[0]["error"]["message"]

                # Test that the returned status object contains a params field with the operation's input parameters
                assert_equal(results[0]["method"], "z_sendmany")
                params = results[0]["params"]
                assert_equal(params["fee"], Decimal('0.0001'))  # default
                assert_equal(params["minconf"], Decimal('1'))  # default
                assert_equal(params["fromaddress"], mytaddr)
                assert_equal(params["amounts"][0]["address"], myzaddr)
                assert_equal(params["amounts"][0]["amount"],
                             Decimal('1.23456789'))
                break
        assert_equal("failed", status)
        assert_equal("wallet does not allow any change" in errorString, True)

        # This send will succeed.  We send two coinbase utxos totalling 20.0 less a fee of 0.00010000, with no change.
        shieldvalue = Decimal('20.0') - Decimal('0.0001')
        recipients = []
        recipients.append({"address": myzaddr, "amount": shieldvalue})
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # Verify that debug=zrpcunsafe logs params, and that full txid is associated with opid
        logpath = self.options.tmpdir + "/node0/regtest/debug.log"
        logcounter = 0
        with open(logpath, "r") as myfile:
            logdata = myfile.readlines()
        for logline in logdata:
            if myopid + ": z_sendmany initialized" in logline and mytaddr in logline and myzaddr in logline:
                assert_equal(logcounter, 0)  # verify order of log messages
                logcounter = logcounter + 1
            if myopid + ": z_sendmany finished" in logline and mytxid in logline:
                assert_equal(logcounter, 1)
                logcounter = logcounter + 1
        assert_equal(logcounter, 2)

        # check balances (the z_sendmany consumes 3 coinbase utxos)
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), Decimal('20.0'))
        assert_equal(Decimal(resp["private"]), Decimal('19.9999'))
        assert_equal(Decimal(resp["total"]), Decimal('39.9999'))

        # The Sprout value pool should reflect the send
        sproutvalue = shieldvalue
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # A custom fee of 0 is okay.  Here the node will send the note value back to itself.
        recipients = []
        recipients.append({"address": myzaddr, "amount": Decimal('19.9999')})
        myopid = self.nodes[0].z_sendmany(myzaddr, recipients, 1,
                                          Decimal('0.0'))
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), Decimal('20.0'))
        assert_equal(Decimal(resp["private"]), Decimal('19.9999'))
        assert_equal(Decimal(resp["total"]), Decimal('39.9999'))

        # The Sprout value pool should be unchanged
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # convert note to transparent funds
        unshieldvalue = Decimal('10.0')
        recipients = []
        recipients.append({"address": mytaddr, "amount": unshieldvalue})
        myopid = self.nodes[0].z_sendmany(myzaddr, recipients)
        mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
        assert (mytxid is not None)
        self.sync_all()

        # check that priority of the tx sending from a zaddr is not 0
        mempool = self.nodes[0].getrawmempool(True)
        assert (Decimal(mempool[mytxid]['startingpriority']) >=
                Decimal('1000000000000'))

        self.nodes[1].generate(1)
        self.sync_all()

        # check balances
        sproutvalue -= unshieldvalue + Decimal('0.0001')
        resp = self.nodes[0].z_gettotalbalance()
        assert_equal(Decimal(resp["transparent"]), Decimal('30.0'))
        assert_equal(Decimal(resp["private"]), Decimal('9.9998'))
        assert_equal(Decimal(resp["total"]), Decimal('39.9998'))
        check_value_pool(self.nodes[0], 'sprout', sproutvalue)

        # z_sendmany will return an error if there is transparent change output considered dust.
        # UTXO selection in z_sendmany sorts in ascending order, so smallest utxos are consumed first.
        # At this point in time, unspent notes all have a value of 10.0 and standard z_sendmany fee is 0.0001.
        recipients = []
        amount = Decimal('10.0') - Decimal('0.00010000') - Decimal(
            '0.00000001'
        )  # this leaves change at 1 zatoshi less than dust threshold
        recipients.append({
            "address": self.nodes[0].getnewaddress(),
            "amount": amount
        })
        myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
        wait_and_assert_operationid_status(
            self.nodes[0], myopid, "failed",
            "Insufficient transparent funds, have 10.00, need 0.00000053 more to avoid creating invalid change output 0.00000001 (dust threshold is 0.00000054)"
        )

        # Send will fail because send amount is too big, even when including coinbase utxos
        errorString = ""
        try:
            self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 99999)
        except JSONRPCException, e:
            errorString = e.error['message']
Ejemplo n.º 27
0
    def excessiveblocksize_test(self):
        print("Testing -excessiveblocksize")

        print("  Set to twice the default, i.e. %d bytes" %
              (2 * LEGACY_MAX_BLOCK_SIZE))
        stop_node(self.nodes[0], 0)
        self.extra_args = [["-excessiveblocksize=%d" %
                            (2 * LEGACY_MAX_BLOCK_SIZE)]]
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   self.extra_args[0])
        self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE)
        # Check for EB correctness in the subver string
        self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/")

        print("  Attempt to set below legacy limit of 1MB - try %d bytes" %
              LEGACY_MAX_BLOCK_SIZE)
        outputchecker = OutputChecker()
        stop_node(self.nodes[0], 0)
        try:
            self.extra_args = [["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains(
                'Error: Excessive block size must be > 1,000,000 bytes (1MB)'))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError("Must not accept excessiveblocksize"
                                 " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE)

        print("  Attempt to set below blockmaxsize (mining limit)")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [['-blockmaxsize=1500000',
                                '-excessiveblocksize=1300000']]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        # Make sure that allowsmallgeneratedblocksize doesn't help here
        outputchecker = OutputChecker()
        try:
            self.extra_args = [['-blockmaxsize=1500000',
                                '-excessiveblocksize=1300000',
                                '-allowsmallgeneratedblocksize']]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        print("  Attempt to set blockmaxsize below 1MB")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE]]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        outputchecker = OutputChecker()
        self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE,
                            "-allowsmallgeneratedblocksize"]]
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   self.extra_args[0],
                                   stderr_checker=outputchecker)
        assert(outputchecker.contains('Warning: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
Ejemplo n.º 28
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 60 * 60 * 24 * 9)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(260)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Generate interim blocks. Due to the "max MTP" soft-forked rule, block timestamps
        # can be no more than 1.5 hours ahead of the chain tip's MTP. Thus we need to mine
        # enough blocks to advance the MTP forward to the desired mocked time.
        self.nodes[0].generate(1000)

        # Mine one more block, so that the prior block looks old
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 2200 * 1024 * 1024
        daily_buffer = 1152 * 2000000
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available / old_block_size

        # 2304GB will be reserved for relaying new blocks, so expect this to
        # succeed for ~14 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        print("Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 200 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(200):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        print("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        print("Peer 1 disconnected after trying to download old block")

        print("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        print("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        print("Restarting nodes with -whitelist=127.0.0.1")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(
            0,
            self.options.tmpdir,
            [
                "-debug",
                '-nuparams=2bb40e60:1',  # Blossom
                "-whitelist=127.0.0.1",
                "-maxuploadtarget=1",
            ])

        #recreate/reconnect 3 test nodes
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        #retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     3)  #node is still connected because of the whitelist

        print(
            "Peer 1 still connected after trying to download old block (whitelisted)"
        )

        [c.disconnect_node() for c in connections]
Ejemplo n.º 29
0
    def run_test(self):
        node = self.nodes[0]

        self.mocktime = int(time.time())

        print("Test block finalization...")
        node.generate(10)
        tip = node.getbestblockhash()
        node.finalizeblock(tip)
        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        def wait_for_tip(node, tip):
            def check_tip():
                return node.getbestblockhash() == tip

            wait_until(check_tip)

        alt_node = self.nodes[1]
        wait_for_tip(alt_node, tip)

        alt_node.invalidateblock(tip)
        # We will use this later
        fork_block = alt_node.getbestblockhash()

        # Node 0 should not accept the whole alt_node's chain due to tip being finalized,
        # even though it is longer.
        # Headers would not be accepted if previousblock is invalid:
        #    - First block from alt node has same height than node tip, but is on a minority chain. Its
        #    status is "valid-headers"
        #    - Second block from alt node has height > node tip height, will be marked as invalid because
        #    node tip is finalized
        #    - Later blocks from alt node will be rejected because their previous block are invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(218 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)

        def wait_for_block(node, block, status="invalid"):
            def check_block():
                for tip in node.getchaintips():
                    if tip["hash"] == block:
                        assert (tip["status"] != "active")
                        return tip["status"] == status
                return False

            wait_until(check_block)

        # First block header is accepted as valid-header
        alt_node.generate(1)
        wait_for_block(node, alt_node.getbestblockhash(), "valid-headers")

        # Second block header is accepted but set invalid
        alt_node.generate(1)
        invalid_block = alt_node.getbestblockhash()
        wait_for_block(node, invalid_block)

        # Later block headers are rejected
        for i in range(2, 9):
            alt_node.generate(1)
            assert_raises_rpc_error(-5, RPC_BLOCK_NOT_FOUND_ERROR,
                                    node.getblockheader,
                                    alt_node.getbestblockhash())

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), tip)
        print("Test that an invalid block cannot be finalized...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock, invalid_block)

        print(
            "Test that invalidating a finalized block moves the finalization backward..."
        )

        # Node's finalized block will be invalidated, which causes the finalized block to
        # move to the previous block.
        #
        # Expected state:
        #
        # On alt_node:
        #                                                 >(210)->(211)-> // ->(218 tip)
        #                                                /
        # (200)->(201)-> // ->(208 auto-finalized)->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210 valid-headers)->(211 invalid)->(212 to 218 dropped)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210 tip)
        node.invalidateblock(tip)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)
        assert_equal(
            alt_node.getfinalizedblockhash(),
            node.getblockheader(
                node.getfinalizedblockhash())['previousblockhash'])

        # The node will now accept that chain as the finalized block moved back.
        # Generate a new block on alt_node to trigger getheader from node
        # Previous 212-218 height blocks have been droped because their previous was invalid
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)->(212)-> // ->(218)->(219 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(invalid_block)

        alt_node_tip = alt_node.generate(1)[-1]
        wait_for_tip(node, alt_node_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), fork_block)
        assert_equal(alt_node.getfinalizedblockhash(), fork_block)

        print("Trigger reorg via block finalization...")
        # Finalize node tip to reorg
        #
        # Expected state:
        #
        # On alt_node:
        #                                          >(210)->(211)-> // ->(218)->(219 tip)
        #                                         /
        # (200)->(201)-> // ->(209 auto-finalized)->(210 invalid)
        #
        # On node:
        #                           >(210 invalid)-> // ->(219 invalid)
        #                          /
        # (200)->(201)-> // ->(209)->(210 finalized, tip)
        node.finalizeblock(tip)
        assert_equal(node.getfinalizedblockhash(), tip)

        print("Try to finalize a block on a competiting fork...")
        assert_raises_rpc_error(-20, RPC_FINALIZE_INVALID_BLOCK_ERROR,
                                node.finalizeblock,
                                alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), tip)

        print("Check auto-finalization occurs as the tip move forward...")
        # Reconsider alt_node tip then generate some more blocks on alt_node.
        # Auto-finalization will occur on both chains.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        node.reconsiderblock(alt_node.getbestblockhash())
        block_to_autofinalize = alt_node.generate(1)[-1]
        alt_node_new_tip = alt_node.generate(9)[-1]
        wait_for_tip(node, alt_node_new_tip)

        assert_equal(node.getbestblockhash(), alt_node.getbestblockhash())
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)
        assert_equal(alt_node.getfinalizedblockhash(), alt_node_tip)

        print("Try to finalize a block on an already finalized chain...")
        # Finalizing a block of an already finalized chain should have no effect
        block_218 = node.getblockheader(alt_node_tip)['previousblockhash']
        node.finalizeblock(block_218)
        assert_equal(node.getfinalizedblockhash(), alt_node_tip)

        print("Make sure reconsidering block move the finalization point...")
        # Reconsidering the tip will move back the finalized block on node
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On node:
        #                                     >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                                    /
        # (200)->(201)-> // ->(209 finalized)->(210)
        node.reconsiderblock(tip)

        assert_equal(node.getbestblockhash(), alt_node_new_tip)
        assert_equal(node.getfinalizedblockhash(), fork_block)

        ### TEST FINALIZATION DELAY ###

        print("Check that finalization delay prevents eclipse attacks")
        # Because there has been no delay since the beginning of this test,
        # there should have been no auto-finalization on delay_node.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(210)->(211)-> // ->(219 auto-finalized)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(210)->(211)-> // ->(219)-> // ->(229 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        delay_node = self.nodes[2]
        wait_for_tip(delay_node, alt_node_new_tip)
        assert_equal(delay_node.getfinalizedblockhash(), str())

        print(
            "Check that finalization delay does not prevent auto-finalization")
        # Expire the delay, then generate 1 new block with alt_node to
        # update the tip on all chains.
        # Because the finalization delay is expired, auto-finalization
        # should occur.
        #
        # Expected state:
        #
        # On alt_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210 invalid)
        #
        # On delay_node:
        #                           >(220 auto-finalized)-> // ->(230 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node], self.mocktime)
        new_tip = alt_node.generate(1)[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(alt_node.getbestblockhash(), new_tip)
        assert_equal(node.getfinalizedblockhash(), block_to_autofinalize)
        assert_equal(alt_node.getfinalizedblockhash(), block_to_autofinalize)

        print("Check that finalization delay is effective on node boot")
        # Restart the new node, so the blocks have no header received time.
        stop_node(self.nodes[2], 2)
        self.nodes[2] = start_node(2, self.options.tmpdir)
        delay_node = self.nodes[2]

        # There should be no finalized block (getfinalizedblockhash returns an empty string)
        assert_equal(delay_node.getfinalizedblockhash(), str())

        # Generate 20 blocks with no delay. This should not trigger auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(250 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        blocks = delay_node.generate(20)
        reboot_autofinalized_block = blocks[10]
        new_tip = blocks[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(), str())

        # Now let the finalization delay to expire, then generate one more block.
        # This should resume auto-finalization.
        #
        # Expected state:
        #
        # On delay_node:
        #                           >(220)-> // ->(241 auto-finalized)-> // ->(251 tip)
        #                          /
        # (200)->(201)-> // ->(209)->(210)
        self.mocktime += self.finalization_delay
        set_node_times([delay_node], self.mocktime)
        new_tip = delay_node.generate(1)[-1]
        wait_for_tip(delay_node, new_tip)

        assert_equal(delay_node.getfinalizedblockhash(),
                     reboot_autofinalized_block)
Ejemplo n.º 30
0
    def run_test(self):
        super().run_test()

        sigma_start_block = 550
        self.nodes[0].generate(sigma_start_block -
                               self.nodes[0].getblockcount())

        # generate mints to spend
        for _ in range(0, 10):
            self.nodes[0].mint(1)

        self.nodes[0].generate(10)
        self.sync_all()

        # create sigma with denominations (1, 2)
        balance = '1000000'
        self.nodes[0].elysium_sendissuancefixed(self.addrs[0], 1, 1, 0, '', '',
                                                'Sigma', '', '', balance, 1)

        self.nodes[0].generate(1)
        sigma_property = 3

        self.nodes[0].elysium_sendcreatedenomination(self.addrs[0],
                                                     sigma_property, '1')
        self.nodes[0].generate(1)

        self.nodes[0].elysium_sendcreatedenomination(self.addrs[0],
                                                     sigma_property, '2')
        self.nodes[0].generate(10)

        # mint 4 coins
        self.nodes[0].elysium_sendmint(self.addrs[0], sigma_property, {
            0: 2,
            1: 2
        })

        # spend 2 coins, then 2 coins remaining
        self.nodes[0].generate(1)
        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 0)
        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 1)

        self.nodes[0].generate(1)

        # generate 2 coins more
        unconfirmed_txid = self.nodes[0].elysium_sendmint(
            self.addrs[0], sigma_property, {
                0: 1,
                1: 1
            })
        raw_unconfirmed = self.nodes[0].getrawtransaction(unconfirmed_txid)

        # check before reindex
        self.sync_all()
        confirmed_mints = self.nodes[0].elysium_listmints()
        unconfirmed_mints = self.nodes[0].elysium_listpendingmints()

        assert_equal(2, len(confirmed_mints))
        assert_equal(2, len(unconfirmed_mints))

        blockcount = self.nodes[0].getblockcount()

        # restart with reindexing
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   ['-elysium', '-reindex'])

        while self.nodes[0].getblockcount() < blockcount:
            time.sleep(0.1)

        connect_nodes(self.nodes[0], 1)

        reindexed_confirmed_mints = self.nodes[0].elysium_listmints()
        self.compare_mints(confirmed_mints, reindexed_confirmed_mints)

        reindexed_unconfirmed_mints = self.nodes[0].elysium_listpendingmints()
        self.compare_mints(unconfirmed_mints, reindexed_unconfirmed_mints)

        # spend remaining mints
        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 0)
        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 1)

        self.nodes[0].generate(1)

        # all mints should be spend
        remaining_mints = self.nodes[0].elysium_listmints()
        assert_equal(0, len(remaining_mints))

        # re-broadcast and try to remint remaining coins
        self.nodes[0].clearmempool()
        self.nodes[0].sendrawtransaction(raw_unconfirmed)
        self.nodes[0].generate(1)

        new_confirmed_mints = self.nodes[0].elysium_listmints()
        self.compare_mints(unconfirmed_mints, new_confirmed_mints)

        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 0)
        self.nodes[0].elysium_sendspend(self.addrs[0], sigma_property, 1)

        self.nodes[0].generate(1)

        remaining_mints = self.nodes[0].elysium_listmints()
        assert_equal(0, len(remaining_mints))

        # all mints are spend then elysium balance should be the same as before
        assert_equal(
            balance,
            self.nodes[0].elysium_getbalance(self.addrs[0],
                                             sigma_property)['balance'])
Ejemplo n.º 31
0
 def stop_three(self):
     stop_node(self.nodes[0], 0)
     stop_node(self.nodes[1], 1)
     stop_node(self.nodes[2], 2)
Ejemplo n.º 32
0
 def stop_three(self):
     stop_node(self.nodes[0], 0)
     stop_node(self.nodes[1], 1)
     stop_node(self.nodes[2], 2)
Ejemplo n.º 33
0
    def test_40300409(self):
        # Sample aliases to be tested
        mn_aliases_to_be_modified = ["mn0", "mn6"]
        mn_hot_node = "mn13"

        # Storing external IP to the corresponding
        # mn for easier identification at assertation
        mn_ext_add_to_modified_p2p = {}

        print("Wait a min!")
        time.sleep(60)
        mns = self.nodes[0].masternodelist("extra")
        # Modifying masternode #1 mn0
        self.modify_masternode_conf_extP2P(mn_aliases_to_be_modified[0],
                                           self.hot_node_num,
                                           self.options.tmpdir,
                                           mn_ext_add_to_modified_p2p)

        time.sleep(60)
        print("Enabling MN {}...".format(mn_aliases_to_be_modified[0]))
        time.sleep(1)

        # Modifying masternode #2 mn6
        self.modify_masternode_conf_extP2P(mn_aliases_to_be_modified[1],
                                           self.hot_node_num,
                                           self.options.tmpdir,
                                           mn_ext_add_to_modified_p2p)
        time.sleep(1)

        print("Stoping and re-enabling MN hot node {}...".format(mn_hot_node))
        #Stopping hot_node
        stop_node(self.nodes[self.hot_node_num], self.hot_node_num)

        print(f"Starting node {self.hot_node_num}...")
        self.nodes[self.hot_node_num] = start_node(
            self.hot_node_num,
            self.options.tmpdir,
            ["-debug=masternode", "-txindex=1", "-reindex"],
            timewait=900)
        for i in range(len(self.nodes)):
            if i != self.hot_node_num:
                connect_nodes_bi(self.nodes, self.hot_node_num, i)

        time.sleep(90)
        print(f"Checking sync status of node {self.hot_node_num}...")
        assert_equal(
            self.nodes[self.hot_node_num].mnsync("status")["IsSynced"], True)
        assert_equal(
            self.nodes[self.hot_node_num].mnsync("status")["IsFailed"], False)

        for mn_alias in mn_aliases_to_be_modified:
            print(f"Enabling MN {mn_alias}...")
            res = self.nodes[self.hot_node_num].masternode(
                "start-alias", mn_alias)
            print(res)
            assert_equal(res["alias"], mn_alias)
            assert_equal(res["result"], "successful")
            time.sleep(1)

        print("Waiting 90 seconds...")
        time.sleep(90)
        print("Hopefully we have be then new P2PAddresses")
        mns = self.nodes[self.hot_node_num].masternodelist("extra")
        for out in mns:
            if (mns[out]["extAddress"] in mn_ext_add_to_modified_p2p):
                assert_equal(
                    mns[out]["extP2P"],
                    mn_ext_add_to_modified_p2p[mns[out]["extAddress"]])