Esempio n. 1
0
    def get_tests(self):
        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        # shorthand for functions
        block = self.chain.next_block

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 99, 100)

        yield test

        # Sending maximal size blocks will not cause disconnection neither banning (still be able to reconnect)
        block(1, spend=out[0], block_size=self.excessive_block_size)
        yield self.accepted()

        # Sending oversized blocks will cause disconnection and banning (not able to reconnect within 10 seconds of bantime)
        assert (not self.test.test_nodes[0].closed)
        block(2, spend=out[1], block_size=self.excessive_block_size + 1)
        assert_equal(len(self.nodes[0].listbanned()),
                     0)  # Before, there are zero banned node
        self.test.connections[0].send_message(msg_block((self.chain.tip)))
        self.test.wait_for_disconnections()
        assert (self.test.test_nodes[0].closed)  # disconnected
        assert (len(self.nodes[0].listbanned()) > 0
                )  # After, list of banned nodes is not empty
        logger.info("Banned node : {}".format(self.nodes[0].listbanned()))

        # Test to reconnect after being banned
        self.restart_network()
        has_been_banned = False
        try:
            self.test.wait_for_verack(5)
        except:
            has_been_banned = True
        assert (has_been_banned)
        logger.info("Test banning excessive block size : PASS")

        time.sleep(10)  #make sure at least 10 seconds (bantime) has passed
        assert_equal(len(self.nodes[0].listbanned()),
                     0)  # Make sure the banned register has been cleared
        # Rewind bad block and reconnect to node
        self.chain.set_tip(1)
        self.restart_network()
        self.test.wait_for_verack(5)

        # Check we can still mine a good size block
        block(3, spend=out[1], block_size=self.excessive_block_size)
        yield self.accepted()
    def get_tests(self):
        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        # shorthand for functions
        block = self.chain.next_block

        # Create a new block
        block(0)
        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(99):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        # Sending maximal size blocks will not cause disconnection neither banning (still be able to reconnect)
        block(1, spend=out[0], block_size=self.excessive_block_size)
        yield self.accepted()
        assert (not self.test.test_nodes[0].closed)
        self.test.clear_all_connections()
        self.test.add_all_connections(self.nodes)
        NetworkThread().start()
        self.test.wait_for_verack(5)

        # Sending oversized blocks will cause disconnection and banning (not able to reconnect within 10 seconds of bantime)
        assert (not self.test.test_nodes[0].closed)
        block(2, spend=out[1], block_size=self.excessive_block_size + 1)
        assert_equal(len(self.nodes[0].listbanned()),
                     0)  # Before, there are zero banned node
        self.test.connections[0].send_message(msg_block((self.chain.tip)))
        self.test.wait_for_disconnections()
        assert (self.test.test_nodes[0].closed)  # disconnected
        assert (len(self.nodes[0].listbanned()) > 0
                )  # After, list of banned nodes is not empty
        logger.info("Banned node : {}".format(self.nodes[0].listbanned()))

        # Test to reconnect after being banned
        self.restart_network()
        has_been_banned = False
        try:
            self.test.wait_for_verack(5)
        except:
            has_been_banned = True
        assert (has_been_banned)
        logger.info("Test banning excessive block size : PASS")

        time.sleep(10)  #make sure at least 10 seconds (bantime) has passed
        assert_equal(len(self.nodes[0].listbanned()),
                     0)  # Make sure the banned register has been cleared
        # Rewind bad block and reconnect to node
        self.chain.set_tip(1)
        self.test.clear_all_connections()
        self.test.add_all_connections(self.nodes)
        NetworkThread().start()
        self.test.wait_for_verack(5)

        # Check we can still mine a good size block
        block(3, spend=out[1], block_size=self.excessive_block_size)
        yield self.accepted()
Esempio n. 3
0
    def run_test(self):
        inv_items = []
        block_priority_block_msg_pos = []
        default_block_msg_pos = []
        last_msg_pos = self.num_txns + 1

        # Initial node setup
        extra_args = [
            '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_connections("Setup node", 0, extra_args,
                                            1) as connections:
            conn = connections[0]

            # Create and send some transactions to the node
            node = self.nodes[0]
            node.generate(100)
            funding_tx = make_funding_transaction(node)
            tx_generator = transaction_generator(funding_tx)
            for tx in islice(tx_generator, self.num_txns):
                inv_items.append(CInv(1, tx.sha256))
                conn.send_message(msg_tx(tx))
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=240)

        # Restart node with associations
        associations_stream_policies = [
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy(),
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy()
        ]
        extra_args = [
            '-whitelist=127.0.0.1', '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_associations(
                "Test block priority",
                0,
                extra_args,
                associations_stream_policies,
                cb_class=MyAssociationCB) as associations:
            # Wait for node to fully reinitialise itself
            node = self.nodes[0]
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=180)

            # Send MEMPOOL request so node will accept our GETDATA for transactions in the mempool
            for association in associations:
                association.send_message(msg_mempool())
                # This request will result in us requesting all the txns. Wait until that finishes and
                # then reset our message counts in preperation for the real test to come.
                wait_until(
                    lambda: association.callbacks.msg_count == self.num_txns)
                association.callbacks.reset_msg_counts()

            # Send GETDATA to request txns and a block, with the block as the last item in the list
            blockhash = int(node.getbestblockhash(), 16)
            inv_items.append(CInv(2, blockhash))
            for association in associations:
                association.send_message(msg_getdata(inv_items))

            # Wait for all GETDATA requests to have a response
            for association in associations:
                wait_until(lambda: association.callbacks.block_count == 1)

                # Remember at what position we got the block msg for the different policies
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    block_priority_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "BlockPriority policy block received at position {}".
                        format(association.callbacks.block_msg_position))
                elif type(association.stream_policy) is DefaultStreamPolicy:
                    default_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "Default policy block received at position {}".format(
                            association.callbacks.block_msg_position))

            # For the DEFAULT policy, the block will have been received last (because it was requested last)
            for pos in default_block_msg_pos:
                assert_equal(pos, last_msg_pos)
            # For the BLOCKPRIORITY policy, the block should have been received sooner (this is possibly
            # slightly racy, but it's been very safe on all systems I've tried it on)
            avg_pos = sum(block_priority_block_msg_pos) / len(
                block_priority_block_msg_pos)
            assert_greater_than(last_msg_pos, avg_pos)

            # Generate a new block to trigger a block INV and wait for the INV
            node.generate(1)
            for association in associations:
                wait_until(lambda: association.callbacks.block_inv_stream_type
                           != StreamType.UNKNOWN)

                # Verify that BlockPriority associations got block INV over the high priority stream
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    assert_equal(association.callbacks.block_inv_stream_type,
                                 StreamType.DATA1)