Exemplo n.º 1
0
    def snapshot_test(self, ignite_version):
        """
        Basic snapshot test.
        """
        version = IgniteVersion(ignite_version)

        ignite_config = IgniteConfiguration(
            version=version,
            data_storage=DataStorageConfiguration(default=DataRegionConfiguration(persistent=True)),
            metric_exporter='org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi'
        )

        nodes = IgniteService(self.test_context, ignite_config, num_nodes=len(self.test_context.cluster) - 1)
        nodes.start()

        control_utility = ControlUtility(nodes)
        control_utility.activate()

        loader_config = IgniteConfiguration(client_mode=True, version=version, discovery_spi=from_ignite_cluster(nodes))

        loader = IgniteApplicationService(
            self.test_context,
            loader_config,
            java_class_name="org.apache.ignite.internal.ducktest.tests.snapshot_test.DataLoaderApplication",
            params={"start": 0, "cacheName": self.CACHE_NAME, "interval": 500_000, "valueSizeKb": 1}
        )
Exemplo n.º 2
0
    def upgrade_test(self, versions, ignite_version):
        """
        Basic upgrade test.
        """
        versions = sorted(list(map(IgniteVersion, versions)))

        self.logger.info(f"Testing: {versions}")

        service = IgniteApplicationService(
            self.test_context,
            config=None,  # will be defined later.
            java_class_name="org.apache.ignite.internal.ducktest.tests.persistence_upgrade_test."
                            "DataLoaderAndCheckerApplication"
        )

        for version in versions:
            service.config = IgniteConfiguration(
                data_storage=DataStorageConfiguration(default=DataRegionConfiguration(persistence_enabled=True)),
                version=version
            )

            service.params = {"check": service.stopped}

            service.start(clean=False)

            control_utility = ControlUtility(service)
            control_utility.activate()

            service.stop()
Exemplo n.º 3
0
    def test_baseline_autoadjust(self, ignite_version):
        """
        Test activate and deactivate cluster.
        """
        blt_size = self.NUM_NODES - 2
        servers = self.__start_ignite_nodes(ignite_version, blt_size)

        control_utility = ControlUtility(servers)
        control_utility.activate()

        # Add node.
        control_utility.enable_baseline_auto_adjust(2000)
        new_node = self.__start_ignite_nodes(ignite_version,
                                             1,
                                             join_cluster=servers)
        blt_size += 1

        wait_until(lambda: len(control_utility.baseline()) == blt_size,
                   timeout_sec=5)

        baseline = control_utility.baseline()
        self.__check_nodes_in_baseline(new_node.nodes, baseline)

        # Add node when auto adjust disabled.
        control_utility.disable_baseline_auto_adjust()
        old_topology = control_utility.cluster_state().topology_version
        new_node = self.__start_ignite_nodes(ignite_version,
                                             1,
                                             join_cluster=servers)

        wait_until(lambda: control_utility.cluster_state().topology_version !=
                   old_topology,
                   timeout_sec=5)
        baseline = control_utility.baseline()
        self.__check_nodes_not_in_baseline(new_node.nodes, baseline)
Exemplo n.º 4
0
    def test_baseline_set(self, ignite_version):
        """
        Test baseline set.
        """
        blt_size = self.NUM_NODES - 2
        servers = self.__start_ignite_nodes(ignite_version, blt_size)

        control_utility = ControlUtility(servers)
        control_utility.activate()

        # Check baseline of activated cluster.
        baseline = control_utility.baseline()
        self.__check_baseline_size(baseline, blt_size)
        self.__check_nodes_in_baseline(servers.nodes, baseline)

        # Set baseline using list of consisttent ids.
        new_node = self.__start_ignite_nodes(ignite_version, 1, join_cluster=servers)
        control_utility.set_baseline(servers.nodes + new_node.nodes)
        blt_size += 1

        baseline = control_utility.baseline()
        self.__check_baseline_size(baseline, blt_size)
        self.__check_nodes_in_baseline(new_node.nodes, baseline)

        # Set baseline using topology version.
        new_node = self.__start_ignite_nodes(ignite_version, 1, join_cluster=servers)
        _, version, _ = control_utility.cluster_state()
        control_utility.set_baseline(version)
        blt_size += 1

        baseline = control_utility.baseline()
        self.__check_baseline_size(baseline, blt_size)
        self.__check_nodes_in_baseline(new_node.nodes, baseline)
Exemplo n.º 5
0
    def test_node_join(self, ignite_version, backups, cache_count, entry_count,
                       entry_size, preloaders, thread_pool_size, batch_size,
                       batches_prefetch_count, throttle):
        """
        Tests rebalance on node join.
        """

        reb_params = RebalanceParams(
            trigger_event=TriggerEvent.NODE_JOIN,
            backups=backups,
            cache_count=cache_count,
            entry_count=entry_count,
            entry_size=entry_size,
            preloaders=preloaders,
            thread_pool_size=thread_pool_size,
            batch_size=batch_size,
            batches_prefetch_count=batches_prefetch_count,
            throttle=throttle,
            persistent=True)

        ignites = start_ignite(self.test_context, ignite_version, reb_params)

        control_utility = ControlUtility(ignites)

        control_utility.activate()

        preload_time = preload_data(
            self.test_context,
            ignites.config._replace(
                client_mode=True, discovery_spi=from_ignite_cluster(ignites)),
            rebalance_params=reb_params)

        new_node = IgniteService(
            self.test_context,
            ignites.config._replace(
                discovery_spi=from_ignite_cluster(ignites)),
            num_nodes=1)
        new_node.start()

        control_utility.add_to_baseline(new_node.nodes)

        await_and_check_rebalance(new_node)

        nodes = ignites.nodes.copy()

        nodes.append(new_node.nodes[0])

        result = get_result(new_node.nodes, preload_time, cache_count,
                            entry_count, entry_size)

        control_utility.deactivate()

        self.logger.debug(
            f'DB size after rebalance: {get_database_size_mb(nodes, ignites.database_dir)}'
        )

        return result
Exemplo n.º 6
0
    def test_node_left(self, ignite_version, backups, cache_count, entry_count,
                       entry_size, preloaders, thread_pool_size, batch_size,
                       batches_prefetch_count, throttle):
        """
        Tests rebalance on node left.
        """

        reb_params = RebalanceParams(
            trigger_event=TriggerEvent.NODE_LEFT,
            backups=backups,
            cache_count=cache_count,
            entry_count=entry_count,
            entry_size=entry_size,
            preloaders=preloaders,
            thread_pool_size=thread_pool_size,
            batch_size=batch_size,
            batches_prefetch_count=batches_prefetch_count,
            throttle=throttle,
            persistent=True)

        ignites = start_ignite(self.test_context, ignite_version, reb_params)

        control_utility = ControlUtility(ignites)

        control_utility.activate()

        preload_time = preload_data(
            self.test_context,
            ignites.config._replace(
                client_mode=True, discovery_spi=from_ignite_cluster(ignites)),
            rebalance_params=reb_params)

        self.logger.debug(
            f'DB size before rebalance: {get_database_size_mb(ignites.nodes, ignites.database_dir)}'
        )

        node = ignites.nodes[-1]

        ignites.stop_node(node)
        assert ignites.wait_node(node)

        control_utility.remove_from_baseline([node])

        await_and_check_rebalance(ignites)

        result = get_result(ignites.nodes[:-1], preload_time, cache_count,
                            entry_count, entry_size)

        control_utility.deactivate()

        self.logger.debug(
            f'DB size after rebalance: {get_database_size_mb(ignites.nodes, ignites.database_dir)}'
        )

        return result
Exemplo n.º 7
0
    def test_server_config_options(self, ignite_version):
        """
        Test to make sure non-default non-trivial ignite node configuration XML file is generated correctly.
        """
        ignite = IgniteService(self.test_context,
                               get_server_config(ignite_version),
                               1,
                               jvm_opts="-DCELL=1")
        ignite.start()

        control_utility = ControlUtility(ignite)
        control_utility.activate()

        ignite.stop()
Exemplo n.º 8
0
    def test_baseline_add_remove(self, ignite_version):
        """
        Test add and remove nodes from baseline.
        """
        blt_size = self.NUM_NODES - 1
        servers = self.__start_ignite_nodes(ignite_version, blt_size)

        control_utility = ControlUtility(servers)

        control_utility.activate()

        # Add node to baseline.
        new_node = self.__start_ignite_nodes(ignite_version,
                                             1,
                                             join_cluster=servers)
        control_utility.add_to_baseline(new_node.nodes)
        blt_size += 1

        baseline = control_utility.baseline()
        self.__check_baseline_size(baseline, blt_size)
        self.__check_nodes_in_baseline(new_node.nodes, baseline)

        # Expected failure (remove of online node is not allowed).
        try:
            control_utility.remove_from_baseline(new_node.nodes)

            assert False, "Remove of online node from baseline should fail!"
        except ControlUtilityError:
            pass

        # Remove of offline node from baseline.
        new_node.stop()

        servers.await_event("Node left topology",
                            timeout_sec=30,
                            from_the_beginning=True)

        control_utility.remove_from_baseline(new_node.nodes)
        blt_size -= 1

        baseline = control_utility.baseline()
        self.__check_baseline_size(baseline, blt_size)
        self.__check_nodes_not_in_baseline(new_node.nodes, baseline)
Exemplo n.º 9
0
    def test_activate_deactivate(self, ignite_version):
        """
        Test activate and deactivate cluster.
        """
        servers = self.__start_ignite_nodes(ignite_version, self.NUM_NODES)

        control_utility = ControlUtility(servers)

        control_utility.activate()

        state, _, _ = control_utility.cluster_state()

        assert state.lower() == 'active', 'Unexpected state %s' % state

        control_utility.deactivate()

        state, _, _ = control_utility.cluster_state()

        assert state.lower() == 'inactive', 'Unexpected state %s' % state
Exemplo n.º 10
0
    def node_join_historical_test(self, ignite_version, backups, cache_count,
                                  entry_count, entry_size, preloaders,
                                  thread_pool_size, batch_size,
                                  batches_prefetch_count, throttle):
        """
        Test historycal rebalance.
        """

        preload_entries = 10_000

        reb_params = RebalanceParams(
            trigger_event=TriggerEvent.NODE_JOIN,
            backups=backups,
            cache_count=cache_count,
            entry_count=entry_count,
            entry_size=entry_size,
            preloaders=preloaders,
            thread_pool_size=thread_pool_size,
            batch_size=batch_size,
            batches_prefetch_count=batches_prefetch_count,
            throttle=throttle,
            persistent=True,
            jvm_opts=[
                '-DIGNITE_PDS_WAL_REBALANCE_THRESHOLD=0',
                '-DIGNITE_PREFER_WAL_REBALANCE=true'
            ])

        ignites = start_ignite(self.test_context, ignite_version, reb_params)

        control_utility = ControlUtility(ignites)
        control_utility.activate()

        preloader_config = ignites.config._replace(
            client_mode=True, discovery_spi=from_ignite_cluster(ignites))

        preloader = IgniteApplicationService(
            self.test_context,
            preloader_config,
            java_class_name=
            "org.apache.ignite.internal.ducktest.tests.rebalance.DataGenerationApplication",
            params={
                "backups": 1,
                "cacheCount": 1,
                "entrySize": 1,
                "from": 0,
                "to": preload_entries
            })

        preloader.run()
        preloader.free()

        control_utility.deactivate()
        control_utility.activate()

        node = ignites.nodes[-1]

        ignites.stop_node(node)
        assert ignites.wait_node(node)

        preload_time = preload_data(
            self.test_context,
            ignites.config._replace(
                client_mode=True, discovery_spi=from_ignite_cluster(ignites)),
            rebalance_params=reb_params)

        control_utility.deactivate()
        control_utility.activate()

        self.logger.debug(
            f'DB size before rebalance: {get_database_size_mb(ignites.nodes, ignites.database_dir)}'
        )

        ignites.start_node(node)
        ignites.await_started()

        rebalance_nodes = [node]

        await_and_check_rebalance(ignites, rebalance_nodes, False)

        result = get_result(rebalance_nodes, preload_time, cache_count,
                            entry_count, entry_size)

        control_utility.deactivate()

        self.logger.debug(
            f'DB size after rebalance: {get_database_size_mb(ignites.nodes, ignites.database_dir)}'
        )

        return result
Exemplo n.º 11
0
    def test_logging(self, ignite_version):
        """
        Tests logging goes to the correct file (consistency.log) when default AI config used.
        """
        cfg_filename = "ignite-default-log4j.xml"

        ignites = IgniteApplicationService(
            self.test_context,
            IgniteConfiguration(
                version=IgniteVersion(ignite_version),
                cluster_state="INACTIVE",
                properties=self.PROPERTIES,
                log4j_config=cfg_filename  # default AI config (will be generated below)
            ),
            java_class_name="org.apache.ignite.internal.ducktest.tests.control_utility.InconsistentNodeApplication",
            params={
                "cacheName": self.CACHE_NAME,
                "amount": 1024,
                "parts": 1,
                "tx": False
            },
            startup_timeout_sec=180,
            num_nodes=len(self.test_context.cluster))

        for node in ignites.nodes:  # copying default AI config with log path replacement
            ignites.init_persistent(node)

            cfg_file = f"{ignites.config_dir}/{cfg_filename}"

            ignites.exec_command(node, f"cp {ignites.home_dir}/config/ignite-log4j.xml {cfg_file}")

            orig = "${IGNITE_HOME}/work/log".replace('/', '\\/')
            fixed = ignites.log_dir.replace('/', '\\/')

            ignites.exec_command(node, f"sed -i 's/{orig}/{fixed}/g' {cfg_file}")

        ignites.start()

        control_utility = ControlUtility(ignites)

        control_utility.activate()

        ignites.await_event("APPLICATION_STREAMING_FINISHED", 60, from_the_beginning=True)

        try:
            control_utility.idle_verify()  # making sure we have broken data
            raise IgniteExecutionException("Fail.")
        except AssertionError:
            pass

        control_utility.check_consistency(f"repair {self.CACHE_NAME} 0")  # checking/repairing

        message = "Cache consistency violations recorded."

        ignites.await_event(message, 60, from_the_beginning=True, log_file="consistency.log")

        try:
            ignites.await_event(message, 10, from_the_beginning=True)
            raise IgniteExecutionException("Fail.")
        except TimeoutError:
            pass