Esempio n. 1
0
    def test_height_min(self):
        if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")):
            raise AssertionError("blk00000.dat is missing, pruning too early")
        self.log.info("Success")
        self.log.info(
            "Though we're already using more than 550MiB, current usage: {}".
            format(calc_usage(self.prunedir)))
        self.log.info(
            "Mining 25 more blocks should cause the first block file to be pruned"
        )
        # Pruning doesn't run until we're allocating another chunk, 20 full
        # blocks past the height cutoff will ensure this
        for i in range(25):
            mine_big_block(self.nodes[0], self.utxo_cache_0)

        # Wait for blk00000.dat to be pruned
        wait_until(lambda: not os.path.isfile(
            os.path.join(self.prunedir, "blk00000.dat")),
                   timeout=30)

        self.log.info("Success")
        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: {}".format(usage))
        if (usage > 550):
            raise AssertionError("Pruning target not being met")
Esempio n. 2
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        self.log.info(
            "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")

        for j in range(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            # Stopping node 0 also clears its mempool, so it doesn't have
            # node1's transactions to accidentally mine
            self.stop_node(0)
            self.start_node(0, extra_args=self.full_node_default_args)
            # Mine 24 blocks in node 1
            for i in range(24):
                if j == 0:
                    mine_big_block(self.nodes[1], self.utxo_cache_1)
                else:
                    # Add node1's wallet transactions back to the mempool, to
                    # avoid the mined blocks from being too small.
                    self.nodes[1].resendwallettransactions()
                    # tx's already in mempool from previous disconnects
                    self.nodes[1].generate(1)

            # Reorg back with 25 block chain from node 0
            for i in range(25):
                mine_big_block(self.nodes[0], self.utxo_cache_0)

            # Create connections in the order so both nodes can see the reorg
            # at the same time
            connect_nodes(self.nodes[1], self.nodes[0])
            connect_nodes(self.nodes[2], self.nodes[0])
            sync_blocks(self.nodes[0:3])

        self.log.info("Usage can be over target because of high stale rate: {}".format(
                      calc_usage(self.prunedir)))
Esempio n. 3
0
    def test_height_min(self):
        if not os.path.isfile(self.prunedir + "blk00000.dat"):
            raise AssertionError("blk00000.dat is missing, pruning too early")
        self.log.info("Success")
        self.log.info(
            "Though we're already using more than 550MiB, current usage: %d" %
            calc_usage(self.prunedir))
        self.log.info(
            "Mining 25 more blocks should cause the first block file to be pruned"
        )
        # Pruning doesn't run until we're allocating another chunk, 20 full
        # blocks past the height cutoff will ensure this
        for i in range(25):
            mine_big_block(self.nodes[0], self.utxo_cache_0)

        waitstart = time.time()
        while os.path.isfile(self.prunedir + "blk00000.dat"):
            time.sleep(0.1)
            if time.time() - waitstart > 30:
                raise AssertionError(
                    "blk00000.dat not pruned when it should be")

        self.log.info("Success")
        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: %d" % usage)
        if (usage > 550):
            raise AssertionError("Pruning target not being met")
Esempio n. 4
0
    def create_big_chain(self):
        # Start by creating some coinbases we can spend later
        self.nodes[1].generate(200)
        sync_blocks(self.nodes[0:2])
        self.nodes[0].generate(150)
        # Then mine enough full blocks to create more than 550MiB of data
        for i in range(645):
            mine_big_block(self.nodes[0], self.utxo_cache_0)

        sync_blocks(self.nodes[0:5])
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # p2p_conns[0] will only request old blocks
        # p2p_conns[1] will only request new blocks
        # p2p_conns[2] will test resetting the counters
        p2p_conns = []

        for _ in range(3):
            p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))

        # Now mine a big block
        mine_big_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_big_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # p2p_conns[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))

        max_bytes_per_day = 200 * 1024 * 1024
        daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 144MB will be reserved for relaying new blocks, so expect this to
        # succeed for ~70 tries.
        for i in range(success_count):
            p2p_conns[0].send_and_ping(getdata_request)
            assert_equal(p2p_conns[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            p2p_conns[0].send_message(getdata_request)
        p2p_conns[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on p2p_conns[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 200 times
        getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
        for i in range(200):
            p2p_conns[1].send_and_ping(getdata_request)
            assert_equal(p2p_conns[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if p2p_conns[1] tries for an old block, it gets disconnected
        # too.
        getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
        p2p_conns[1].send_message(getdata_request)
        p2p_conns[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and p2p_conns[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        p2p_conns[2].sync_with_ping()
        p2p_conns[2].send_and_ping(getdata_request)
        assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        self.nodes[0].disconnect_p2ps()

        self.log.info("Restarting node 0 with download permission"
                      " and 1MB maxuploadtarget")
        self.restart_node(0, [
            "[email protected]", "-maxuploadtarget=1",
            "-blockmaxsize=999000"
        ])

        # Reconnect to self.nodes[0]
        self.nodes[0].add_p2p_connection(TestP2PConn())

        # retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
        for i in range(20):
            self.nodes[0].p2p.send_and_ping(getdata_request)
            assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block],
                         i + 1)

        getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
        self.nodes[0].p2p.send_and_ping(getdata_request)

        self.log.info(
            "Peer still connected after trying to download old block (download permission)"
        )
        peer_info = self.nodes[0].getpeerinfo()
        # node is still connected
        assert_equal(len(peer_info), 1)
        assert_equal(peer_info[0]['permissions'], ['download'])
Esempio n. 6
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                         test_nodes[i]))
            test_nodes[i].add_connection(connections[i])

        # Start up network handling in another thread
        NetworkThread().start()
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        mine_big_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_big_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 200 * 1024 * 1024
        daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 144MB will be reserved for relaying new blocks, so expect this to
        # succeed for ~70 tries.
        for i in range(success_count):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 200 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(200):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected
        # too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, [
            "-whitelist=127.0.0.1", "-maxuploadtarget=1",
            "-blockmaxsize=999000"
        ])

        # recreate/reconnect a test node
        test_nodes = [TestNode()]
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])
        ]
        test_nodes[0].add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        test_nodes[0].wait_for_verack()

        # retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[0].send_and_ping(getdata_request)
        # node is still connected because of the whitelist
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info(
            "Peer still connected after trying to download old block (whitelisted)"
        )