Esempio n. 1
0
    def excessiveblocksize_test(self):
        self.log.info("Testing -excessiveblocksize")

        self.log.info("  Set to twice the default, i.e. %d bytes" %
                      (2 * LEGACY_MAX_BLOCK_SIZE))
        stop_node(self.nodes[0], 0)
        self.extra_args = [[
            "-excessiveblocksize=%d" % (2 * LEGACY_MAX_BLOCK_SIZE)
        ]]
        self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
        self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE)
        # Check for EB correctness in the subver string
        self.check_subversion("/Commercium:.*\(EB2\.0; .*\)/")

        self.log.info(
            "  Attempt to set below legacy limit of 1MB - try %d bytes" %
            LEGACY_MAX_BLOCK_SIZE)
        outputchecker = OutputChecker()
        stop_node(self.nodes[0], 0)
        try:
            self.extra_args = [[
                "-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE
            ]]
            self.nodes[0] = start_node(0,
                                       self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert (outputchecker.contains(
                'Error: Excessive block size must be > 1,000,000 bytes (1MB)'))
            assert_equal(
                'commerciumd exited with status 1 during initialization',
                str(e))
        else:
            raise AssertionError("Must not accept excessiveblocksize"
                                 " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE)

        self.log.info("  Attempt to set below blockmaxsize (mining limit)")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [[
                '-blockmaxsize=1500000', '-excessiveblocksize=1300000'
            ]]
            self.nodes[0] = start_node(0,
                                       self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert (outputchecker.contains('Error: ' +
                                           MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal(
                'commerciumd exited with status 1 during initialization',
                str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        # Make sure we leave the test with a node running as this is what thee
        # framework expects.
        self.nodes[0] = start_node(0, self.options.tmpdir, [])
Esempio n. 2
0
    def excessiveblocksize_test(self):
        print("Testing -excessiveblocksize")

        print("  Set to twice the default, i.e. %d bytes" %
              (2 * LEGACY_MAX_BLOCK_SIZE))
        stop_node(self.nodes[0], 0)
        self.extra_args = [["-excessiveblocksize=%d" %
                            (2 * LEGACY_MAX_BLOCK_SIZE)]]
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   self.extra_args[0])
        self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE)
        # Check for EB correctness in the subver string
        self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/")

        print("  Attempt to set below legacy limit of 1MB - try %d bytes" %
              LEGACY_MAX_BLOCK_SIZE)
        outputchecker = OutputChecker()
        stop_node(self.nodes[0], 0)
        try:
            self.extra_args = [["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains(
                'Error: Excessive block size must be > 1,000,000 bytes (1MB)'))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError("Must not accept excessiveblocksize"
                                 " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE)

        print("  Attempt to set below blockmaxsize (mining limit)")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [['-blockmaxsize=1500000',
                                '-excessiveblocksize=1300000']]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        # Make sure that allowsmallgeneratedblocksize doesn't help here
        outputchecker = OutputChecker()
        try:
            self.extra_args = [['-blockmaxsize=1500000',
                                '-excessiveblocksize=1300000',
                                '-allowsmallgeneratedblocksize']]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        print("  Attempt to set blockmaxsize below 1MB")
        outputchecker = OutputChecker()
        try:
            self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE]]
            self.nodes[0] = start_node(0, self.options.tmpdir,
                                       self.extra_args[0],
                                       stderr_checker=outputchecker)
        except Exception as e:
            assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
            assert_equal('bitcoind exited with status 1 during initialization', str(e))
        else:
            raise AssertionError('Must not accept excessiveblocksize'
                                 ' below blockmaxsize')

        outputchecker = OutputChecker()
        self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE,
                            "-allowsmallgeneratedblocksize"]]
        self.nodes[0] = start_node(0, self.options.tmpdir,
                                   self.extra_args[0],
                                   stderr_checker=outputchecker)
        assert(outputchecker.contains('Warning: ' + MAX_GENERATED_BLOCK_SIZE_ERROR))
Esempio n. 3
0
    def setup_network(self):
        '''
        We'll setup the network to have 3 nodes that all mine with different parameters.
        But first we need to use one node to create a lot of small low priority outputs
        which we will use to generate our transactions.
        '''
        self.nodes = []
        # Use node0 to mine blocks for input splitting
        self.nodes.append(
            start_node(0, self.options.tmpdir,
                       ["-maxorphantx=1000", "-whitelist=127.0.0.1"]))

        print("This test is time consuming, please be patient")
        print(
            "Splitting inputs to small size so we can generate low priority tx's"
        )
        self.txouts = []
        self.txouts2 = []
        # Split a coinbase into two transaction puzzle outputs
        split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts,
                     True)

        # Mine
        while (len(self.nodes[0].getrawmempool()) > 0):
            self.nodes[0].generate(1)

        # Repeatedly split those 2 outputs, doubling twice for each rep
        # Use txouts to monitor the available utxo, since these won't be tracked in wallet
        reps = 0
        while (reps < 5):
            #Double txouts to txouts2
            while (len(self.txouts) > 0):
                split_inputs(self.nodes[0], self.txouts, self.txouts2)
            while (len(self.nodes[0].getrawmempool()) > 0):
                self.nodes[0].generate(1)
            #Double txouts2 to txouts
            while (len(self.txouts2) > 0):
                split_inputs(self.nodes[0], self.txouts2, self.txouts)
            while (len(self.nodes[0].getrawmempool()) > 0):
                self.nodes[0].generate(1)
            reps += 1
        print("Finished splitting")

        # Now we can connect the other nodes, didn't want to connect them earlier
        # so the estimates would not be affected by the splitting transactions
        # Node1 mines small blocks but that are bigger than the expected transaction rate,
        # and allows free transactions.
        # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
        # (17k is room enough for 110 or so transactions)
        self.nodes.append(
            start_node(1,
                       self.options.tmpdir, [
                           "-blockprioritysize=1500", "-blockmaxsize=17000",
                           "-maxorphantx=1000", "-debug=estimatefee",
                           "-allowsmallgeneratedblocksize"
                       ],
                       stderr_checker=OutputChecker()))
        connect_nodes(self.nodes[1], 0)

        # Node2 is a stingy miner, that
        # produces too small blocks (room for only 55 or so transactions)
        node2args = [
            "-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000",
            "-allowsmallgeneratedblocksize"
        ]

        self.nodes.append(
            start_node(2,
                       self.options.tmpdir,
                       node2args,
                       stderr_checker=OutputChecker()))
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[2], 1)

        self.is_network_split = False
        self.sync_all()
Esempio n. 4
0
    def reorg_test(self):
        # Node 1 will mine a 300 block chain starting 287 blocks back from Node
        # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring
        # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to
        # clear its mempool (hopefully make the invalidate faster). Lower the
        # block max size so we don't keep mining all our big mempool
        # transactions (from disconnected blocks)
        self.stop_node(1)
        self.nodes[1] = start_node(
            1,
            self.options.tmpdir, [
                "-maxreceivebuffer=20000", "-blockmaxsize=5000",
                "-checkblocks=5", "-disablesafemode"
            ],
            timewait=900,
            stderr_checker=OutputChecker())

        height = self.nodes[1].getblockcount()
        self.log.info("Current block height: %d" % height)

        invalidheight = height - 287
        badhash = self.nodes[1].getblockhash(invalidheight)
        self.log.info("Invalidating block %s at height %d" %
                      (badhash, invalidheight))
        self.nodes[1].invalidateblock(badhash)

        # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want.
        # So invalidate that fork as well, until we're on the same chain as
        # node 0/2 (but at an ancestor 288 blocks ago)
        mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
        curhash = self.nodes[1].getblockhash(invalidheight - 1)
        while curhash != mainchainhash:
            self.nodes[1].invalidateblock(curhash)
            curhash = self.nodes[1].getblockhash(invalidheight - 1)

        assert (self.nodes[1].getblockcount() == invalidheight - 1)
        self.log.info("New best height: %d" % self.nodes[1].getblockcount())

        # Reboot node1 to clear those giant tx's from mempool
        self.stop_node(1)
        self.nodes[1] = start_node(
            1,
            self.options.tmpdir, [
                "-maxreceivebuffer=20000", "-blockmaxsize=5000",
                "-checkblocks=5", "-disablesafemode", "-blockmaxsize=1000000"
            ],
            timewait=900,
            stderr_checker=OutputChecker())

        self.log.info("Generating new longer chain of 300 more blocks")
        self.nodes[1].generate(300)

        self.log.info("Reconnect nodes")
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[2], 1)
        sync_blocks(self.nodes[0:3], timeout=120)

        self.log.info("Verify height on node 2: %d" %
                      self.nodes[2].getblockcount())
        self.log.info(
            "Usage possibly still high bc of stale blocks in block files: %d" %
            calc_usage(self.prunedir))

        self.log.info(
            "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
        )
        for i in range(22):
            # This can be slow, so do this in multiple RPC calls to avoid
            # RPC timeouts.
            # node 0 has many large tx's in its mempool from the disconnects
            self.nodes[0].generate(10)
        sync_blocks(self.nodes[0:3], timeout=300)

        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: %d" % usage)
        if (usage > 550):
            raise AssertionError("Pruning target not being met")

        return invalidheight, badhash