def check_numeric_version(): """ Checks numeric version. """ v_2_99_1 = IgniteVersion('2.99.1') ignite_v_2_99_1 = IgniteVersion('ignite-2.99.1') fork_v_2_99_1 = IgniteVersion('fork-2.99.1') assert not v_2_99_1.is_dev assert not ignite_v_2_99_1.is_dev assert not fork_v_2_99_1.is_dev assert not LATEST.is_dev assert v_2_99_1 == ignite_v_2_99_1 # todo solve comparability issues and uncomment the following # with pytest.raises(Exception): # assert v_2_99_1 != fork_v_2_99_1 # incomparable assert v_2_99_1.version == ignite_v_2_99_1.version == fork_v_2_99_1.version == [ 2, 99, 1 ] assert v_2_99_1.project == ignite_v_2_99_1.project == LATEST.project == "ignite" assert fork_v_2_99_1.project == "fork" assert str(v_2_99_1) == str(ignite_v_2_99_1) == 'ignite-2.99.1' assert str(fork_v_2_99_1) == 'fork-2.99.1'
def check_dev_version(): """" Check developer version. """ dev = IgniteVersion('dev') ignite_dev = IgniteVersion('ignite-dev') fork_dev = IgniteVersion('fork-dev') assert DEV_BRANCH.is_dev assert dev.is_dev assert ignite_dev.is_dev assert fork_dev.is_dev assert DEV_BRANCH == dev == ignite_dev # pylint: disable=W0511 # todo solve comparability issues and uncomment the following # with pytest.raises(Exception): # assert DEV_BRANCH != fork_dev # incomparable assert DEV_BRANCH.version == dev.version == ignite_dev.version == fork_dev.version assert DEV_BRANCH.project == dev.project == ignite_dev.project == "ignite" assert fork_dev.project == "fork" index = __version__.find('-') if index > 0: ver = IgniteVersion(__version__[:index]) assert dev > ver assert ignite_dev > ver assert dev.version > ver.version assert ignite_dev.version > ver.version assert dev > LATEST assert dev.version > LATEST.version assert ignite_dev > LATEST assert ignite_dev.version > LATEST.version assert DEV_BRANCH > LATEST assert DEV_BRANCH.version > LATEST.version # pylint: disable=W0511 # todo solve comparability issues and uncomment the following # with pytest.raises(Exception): # assert fork_dev != LATEST # incomparable assert fork_dev.version != LATEST.version assert str(dev) == str(ignite_dev) == str(DEV_BRANCH) == 'ignite-dev' assert str(fork_dev) == 'fork-dev'
def start_cell(self, version, jvm_opts, discovery_spi=None, modules=None, nodes_cnt=NODES_PER_CELL): """ Starts cell. """ ignites = IgniteService( self.test_context, IgniteConfiguration( version=IgniteVersion(version), properties=self.properties(), cluster_state="INACTIVE", failure_detection_timeout=self.FAILURE_DETECTION_TIMEOUT, discovery_spi=TcpDiscoverySpi() if discovery_spi is None else discovery_spi), num_nodes=nodes_cnt, modules=modules, jvm_opts=jvm_opts, startup_timeout_sec=180) ignites.start_async() return ignites
def __start_ignite_nodes(self, version, num_nodes, timeout_sec=60, join_cluster=None): config = IgniteConfiguration( cluster_state="INACTIVE", version=IgniteVersion(version), data_storage=DataStorageConfiguration( default=DataRegionConfiguration(name='persistent', persistence_enabled=True), regions=[ DataRegionConfiguration(name='in-memory', persistence_enabled=False, max_size=100 * 1024 * 1024) ])) if join_cluster: config._replace(discovery_spi=from_ignite_cluster(join_cluster)) servers = IgniteService(self.test_context, config=config, num_nodes=num_nodes, startup_timeout_sec=timeout_sec) servers.start() return servers
def snapshot_test(self, ignite_version): """ Basic snapshot test. """ version = IgniteVersion(ignite_version) ignite_config = IgniteConfiguration( version=version, data_storage=DataStorageConfiguration(default=DataRegionConfiguration(persistent=True)), metric_exporter='org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi' ) nodes = IgniteService(self.test_context, ignite_config, num_nodes=len(self.test_context.cluster) - 1) nodes.start() control_utility = ControlUtility(nodes) control_utility.activate() loader_config = IgniteConfiguration(client_mode=True, version=version, discovery_spi=from_ignite_cluster(nodes)) loader = IgniteApplicationService( self.test_context, loader_config, java_class_name="org.apache.ignite.internal.ducktest.tests.snapshot_test.DataLoaderApplication", params={"start": 0, "cacheName": self.CACHE_NAME, "interval": 500_000, "valueSizeKb": 1} )
def test_config_add_to_result(self, ignite_version, is_ignite_service): """ Test that the config file is in config directory and Service.logs contains the config directory to add to the result. """ ignite_cfg = IgniteConfiguration(version=IgniteVersion(ignite_version)) if is_ignite_service: ignite = IgniteService(self.test_context, ignite_cfg, num_nodes=1) else: ignite = IgniteApplicationService( self.test_context, ignite_cfg, java_class_name= "org.apache.ignite.internal.ducktest.tests.self_test.TestKillableApplication" ) ignite.start() assert ignite.logs.get('config').get('path') == ignite.config_dir assert ignite.config_file.startswith(ignite.config_dir) ignite.nodes[0].account.ssh( f'ls {ignite.config_dir} | grep {os.path.basename(ignite.config_file)}' ) ignite.nodes[0].account.ssh( f'ls {ignite.config_dir} | grep {os.path.basename(ignite.log_config_file)}' ) ignite.stop()
def start_cell_with_prepared_txs(self, version, cell_id, discovery_spi, modules, col_cnt=0, noncol_cnt=0, multi_cnt=0): """ Starts cell with prepared transactions. """ nodes = self.start_cell( version, ['-D' + CellularAffinity.ATTRIBUTE + '=' + cell_id], discovery_spi, modules, CellularAffinity.NODES_PER_CELL - 1) prepared_tx_streamer = IgniteApplicationService( # last server node at the cell. self.test_context, IgniteConfiguration(version=IgniteVersion(version), properties=self.properties(), failure_detection_timeout=self.FAILURE_DETECTION_TIMEOUT, discovery_spi=from_ignite_cluster(nodes) if discovery_spi is None else discovery_spi), java_class_name="org.apache.ignite.internal.ducktest.tests.cellular_affinity_test." "CellularPreparedTxStreamer", params={"cacheName": CellularAffinity.CACHE_NAME, "attr": CellularAffinity.ATTRIBUTE, "cell": cell_id, "colocatedTxCnt": col_cnt, "multiTxCnt": multi_cnt, "noncolocatedTxCnt": noncol_cnt}, jvm_opts=['-D' + CellularAffinity.ATTRIBUTE + '=' + cell_id], modules=modules, startup_timeout_sec=180) prepared_tx_streamer.start_async( ) # starts last server node and creates prepared txs on it. return nodes, prepared_tx_streamer
def test_simple_services_start_stop(self, ignite_version): """ Tests plain services start and stop (termitation vs self-terination). """ ignites = IgniteService( self.test_context, IgniteConfiguration(version=IgniteVersion(ignite_version)), num_nodes=1) ignites.start() client = IgniteService( self.test_context, IgniteClientConfiguration(version=IgniteVersion(ignite_version)), num_nodes=1) client.start() node1 = IgniteApplicationService( self.test_context, IgniteClientConfiguration( version=IgniteVersion(ignite_version), discovery_spi=from_ignite_cluster(ignites)), java_class_name= "org.apache.ignite.internal.ducktest.tests.self_test.TestKillableApplication", startup_timeout_sec=180) node2 = IgniteApplicationService( self.test_context, IgniteClientConfiguration( version=IgniteVersion(ignite_version), discovery_spi=from_ignite_cluster(ignites)), java_class_name= "org.apache.ignite.internal.ducktest.tests.self_test.TestSelfKillableApplication", startup_timeout_sec=180) node1.start() node2.run() node1.stop() client.stop() ignites.stop()
def test_change_users(self, ignite_version): """ Test add, update and remove user """ config = IgniteConfiguration( cluster_state="INACTIVE", auth_enabled=True, version=IgniteVersion(ignite_version), data_storage=DataStorageConfiguration( default=DataRegionConfiguration(persistent=True)), client_connector_configuration=ClientConnectorConfiguration()) servers = IgniteService(self.test_context, config=config, num_nodes=self.NUM_NODES - 1) servers.start() ControlUtility(cluster=servers, username=DEFAULT_AUTH_USERNAME, password=DEFAULT_AUTH_PASSWORD).activate() client_cfg = IgniteThinClientConfiguration( addresses=servers.nodes[0].account.hostname + ":" + str(config.client_connector_configuration.port), version=IgniteVersion(ignite_version), username=DEFAULT_AUTH_USERNAME, password=DEFAULT_AUTH_PASSWORD) # Add new user check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD, True) self.run_with_creds(client_cfg, ADD_USER, TEST_USERNAME, TEST_PASSWORD) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD) # Update user password check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2, True) self.run_with_creds(client_cfg, UPDATE_USER, TEST_USERNAME, TEST_PASSWORD2) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD, True) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2) # Remove user self.run_with_creds(client_cfg, REMOVE_USER, TEST_USERNAME, free=False) check_authenticate(servers, TEST_USERNAME, TEST_PASSWORD2, True)
def apply(self, seed_context, context_list): assert len(context_list) > 0, "ignore if decorator is not being applied to any test cases" for ctx in context_list: if self.variable_name in ctx.injected_args: version = ctx.injected_args[self.variable_name] assert isinstance(version, str), "'%s'n injected args must be a string" % (self.variable_name,) ctx.ignore = ctx.ignore or self.condition(IgniteVersion(version), ctx.globals) return context_list
def test_ignite_start_stop(self, ignite_version): """ Test that IgniteService correctly start and stop """ ignite = IgniteService( self.test_context, IgniteConfiguration(version=IgniteVersion(ignite_version)), num_nodes=1) print(self.test_context) ignite.start() ignite.stop()
def test_2_nodes_fail_sequential_zk(self, ignite_version, load_type): """ Test node failure scenario with ZooKeeperSpi not allowing to fail nodes in a row. """ test_config = DiscoveryTestConfig( version=IgniteVersion(ignite_version), nodes_to_kill=2, with_zk=True, load_type=ClusterLoad.construct_from(load_type), sequential_failure=True) return self._perform_node_fail_scenario(test_config)
def _check_injection(context_list, *, versions, global_args=None, pairs=False): if global_args: global_versions = global_args['ignite_versions'] if isinstance(global_versions, str): check_versions = [IgniteVersion(global_versions)] elif isinstance(global_args['ignite_versions'], tuple): check_versions = [tuple(map(IgniteVersion, global_versions))] elif pairs: check_versions = list(map(lambda x: (IgniteVersion(x[0]), IgniteVersion(x[1])), global_versions)) else: check_versions = list(map(IgniteVersion, global_versions)) else: if not pairs: check_versions = list(map(IgniteVersion, versions)) else: check_versions = list(map(lambda x: (IgniteVersion(x[0]), IgniteVersion(x[1])), versions)) assert len(context_list) == len(check_versions) for i, ctx in enumerate(sorted(context_list, key=lambda x: x.function())): assert ctx.function() == check_versions[i]
def test_sequential_failure_tcp(self, ignite_version, load_type, net_part: IgniteService.NetPart): """ Test 2 nodes sequential failure scenario with TcpDiscoverySpi. """ test_config = DiscoveryTestConfig( version=IgniteVersion(ignite_version), nodes_to_kill=2, load_type=ClusterLoad.construct_from(load_type), sequential_failure=True, net_part=net_part) return self._perform_node_fail_scenario(test_config)
def test_sequential_failure_tcp_no_recovery(self, ignite_version, load_type): """ Test 2 nodes sequential failure scenario with TcpDiscoverySpi. The connection recovery is disabled. """ test_config = DiscoveryTestConfig( version=IgniteVersion(ignite_version), nodes_to_kill=2, load_type=ClusterLoad.construct_from(load_type), sequential_failure=True, disable_conn_recovery=True) return self._perform_node_fail_scenario(test_config)
def test_nonsequential_failure_tcp(self, ignite_version, nodes_to_kill, load_type, net_part: IgniteService.NetPart): """ Test nodes failure scenario with TcpDiscoverySpi not allowing nodes to fail in a row. """ test_config = DiscoveryTestConfig( version=IgniteVersion(ignite_version), nodes_to_kill=nodes_to_kill, load_type=ClusterLoad.construct_from(load_type), sequential_failure=False, net_part=net_part) return self._perform_node_fail_scenario(test_config)
def __start_ignite_nodes(self, version, num_nodes, timeout_sec=60): config = IgniteConfiguration(cluster_state="ACTIVE", version=IgniteVersion(version), caches=[ CacheConfiguration( name=self.CACHE_NAME, atomicity_mode='TRANSACTIONAL') ]) servers = IgniteService(self.test_context, config=config, num_nodes=num_nodes, startup_timeout_sec=timeout_sec) servers.start() return servers
def __start_tx_app(self, version, servers, *, client_mode=True, **kwargs): app_params = { 'config': IgniteConfiguration(version=IgniteVersion(version), client_mode=client_mode, discovery_spi=from_ignite_cluster(servers)), 'java_class_name': 'org.apache.ignite.internal.ducktest.tests.control_utility' '.LongRunningTransactionsGenerator', 'params': kwargs } app = IgniteApplicationService(self.test_context, **app_params) app.start() return app
def test_ssl_connection(self, ignite_version): """ Test that IgniteService, IgniteApplicationService correctly start and stop with ssl configurations. And check ControlUtility with ssl arguments. """ shared_root = get_shared_root_path(self.test_context.globals) server_ssl = SslParams(shared_root, key_store_jks=DEFAULT_SERVER_KEYSTORE) server_configuration = IgniteConfiguration( version=IgniteVersion(ignite_version), ssl_params=server_ssl, connector_configuration=ConnectorConfiguration( ssl_enabled=True, ssl_params=server_ssl)) ignite = IgniteService(self.test_context, server_configuration, num_nodes=2, startup_timeout_sec=180) client_configuration = server_configuration._replace( client_mode=True, ssl_params=SslParams(shared_root, key_store_jks=DEFAULT_CLIENT_KEYSTORE), connector_configuration=None) app = IgniteApplicationService( self.test_context, client_configuration, java_class_name= "org.apache.ignite.internal.ducktest.tests.smoke_test.SimpleApplication", startup_timeout_sec=180) admin_ssl = SslParams(shared_root, key_store_jks=DEFAULT_ADMIN_KEYSTORE) control_utility = ControlUtility(cluster=ignite, ssl_params=admin_ssl) ignite.start() app.start() control_utility.cluster_state() app.stop() ignite.stop()
def test_logs_rotation(self, ignite_version): """ Test logs rotation after ignite service restart. """ def get_log_lines_count(service, filename): node = service.nodes[0] log_file = os.path.join(service.log_dir, filename) log_cnt = list( node.account.ssh_capture(f'cat {log_file} | wc -l', callback=int))[0] return log_cnt def get_logs_count(service): node = service.nodes[0] return list( node.account.ssh_capture( f'ls {service.log_dir}/ignite.log* | wc -l', callback=int))[0] ignites = IgniteService( self.test_context, IgniteConfiguration(version=IgniteVersion(ignite_version)), num_nodes=1) ignites.start() num_restarts = 6 for i in range(num_restarts - 1): ignites.stop() old_cnt = get_log_lines_count(ignites, "ignite.log") assert old_cnt > 0 ignites.start(clean=False) new_cnt = get_log_lines_count(ignites, "ignite.log") assert new_cnt > 0 # check that there is no new entry in rotated file assert old_cnt == get_log_lines_count(ignites, f"ignite.log.{i + 1}") assert get_logs_count(ignites) == num_restarts
def start_ignite(test_context, ignite_version: str, rebalance_params: RebalanceParams) -> IgniteService: """ Start IgniteService: :param test_context: Test context. :param ignite_version: Ignite version. :param rebalance_params: Rebalance parameters. :return: IgniteService. """ node_count = test_context.available_cluster_size - rebalance_params.preloaders if rebalance_params.persistent: data_storage = DataStorageConfiguration( max_wal_archive_size=2 * rebalance_params.data_region_max_size, default=DataRegionConfiguration( persistence_enabled=True, max_size=rebalance_params.data_region_max_size)) else: data_storage = DataStorageConfiguration( default=DataRegionConfiguration( max_size=rebalance_params.data_region_max_size)) node_config = IgniteConfiguration( version=IgniteVersion(ignite_version), data_storage=data_storage, metric_exporters={ "org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi" }, rebalance_thread_pool_size=rebalance_params.thread_pool_size, rebalance_batch_size=rebalance_params.batch_size, rebalance_batches_prefetch_count=rebalance_params. batches_prefetch_count, rebalance_throttle=rebalance_params.throttle) ignites = IgniteService(test_context, config=node_config, num_nodes=node_count if rebalance_params.trigger_event else node_count - 1, jvm_opts=rebalance_params.jvm_opts) ignites.start() return ignites
def start_tx_streamer(self, version, cell, discovery_spi, modules): """ Starts transaction streamer. """ return IgniteApplicationService( self.test_context, IgniteClientConfiguration(version=IgniteVersion(version), properties=self.properties(), discovery_spi=discovery_spi), java_class_name= "org.apache.ignite.internal.ducktest.tests.cellular_affinity_test.CellularTxStreamer", params={ "cacheName": CellularAffinity.CACHE_NAME, "attr": CellularAffinity.ATTRIBUTE, "cell": cell, "warmup": 10000 }, modules=modules, startup_timeout_sec=180)
def test_thin_client_compatibility(self, server_version, thin_client_version): """ Thin client compatibility test. """ server_config = IgniteConfiguration(version=IgniteVersion(server_version), client_connector_configuration=ClientConnectorConfiguration()) ignite = IgniteService(self.test_context, server_config, 1) addresses = ignite.nodes[0].account.hostname + ":" + str(server_config.client_connector_configuration.port) thin_clients = IgniteApplicationService(self.test_context, IgniteThinClientConfiguration(addresses=addresses, version=IgniteVersion( thin_client_version)), java_class_name=self.JAVA_CLIENT_CLASS_NAME, num_nodes=1) ignite.start() thin_clients.run() ignite.stop()
def test_assertion_convertion(self, ignite_version): """ Test to make sure Java assertions are converted to python exceptions """ server_configuration = IgniteConfiguration( version=IgniteVersion(ignite_version)) app = IgniteApplicationService( self.test_context, server_configuration, java_class_name= "org.apache.ignite.internal.ducktest.tests.smoke_test.AssertionApplication" ) try: app.start() except IgniteExecutionException as ex: assert str( ex ) == "Java application execution failed. java.lang.AssertionError" else: app.stop() assert False
def test_ignite_app_start_stop(self, ignite_version): """ Test that IgniteService and IgniteApplicationService correctly start and stop """ server_configuration = IgniteConfiguration( version=IgniteVersion(ignite_version)) ignite = IgniteService(self.test_context, server_configuration, num_nodes=1) client_configuration = server_configuration._replace( client_mode=True, discovery_spi=from_ignite_cluster(ignite)) app = IgniteApplicationService( self.test_context, client_configuration, java_class_name= "org.apache.ignite.internal.ducktest.tests.smoke_test.SimpleApplication" ) ignite.start() app.start() app.stop() ignite.stop()
def test_distribution(self, ignite_version): """ Tests Cellular Affinity scenario (partition distribution). """ cell1 = self.start_cell(ignite_version, ['-D' + CellularAffinity.ATTRIBUTE + '=1']) discovery_spi = from_ignite_cluster(cell1) cell2 = self.start_cell(ignite_version, ['-D' + CellularAffinity.ATTRIBUTE + '=2'], discovery_spi) cell3 = self.start_cell( ignite_version, ['-D' + CellularAffinity.ATTRIBUTE + '=XXX', '-DRANDOM=42'], discovery_spi) for cell in [cell1, cell2, cell3]: cell.await_started() ControlUtility(cell1).activate() checker = IgniteApplicationService( self.test_context, IgniteClientConfiguration( version=IgniteVersion(ignite_version), discovery_spi=from_ignite_cluster(cell1)), java_class_name= "org.apache.ignite.internal.ducktest.tests.cellular_affinity_test.DistributionChecker", params={ "cacheName": CellularAffinity.CACHE_NAME, "attr": CellularAffinity.ATTRIBUTE, "nodesPerCell": self.NODES_PER_CELL }) checker.run()
def test_logging(self, ignite_version): """ Tests logging goes to the correct file (consistency.log) when default AI config used. """ cfg_filename = "ignite-default-log4j.xml" ignites = IgniteApplicationService( self.test_context, IgniteConfiguration( version=IgniteVersion(ignite_version), cluster_state="INACTIVE", properties=self.PROPERTIES, log4j_config=cfg_filename # default AI config (will be generated below) ), java_class_name="org.apache.ignite.internal.ducktest.tests.control_utility.InconsistentNodeApplication", params={ "cacheName": self.CACHE_NAME, "amount": 1024, "parts": 1, "tx": False }, startup_timeout_sec=180, num_nodes=len(self.test_context.cluster)) for node in ignites.nodes: # copying default AI config with log path replacement ignites.init_persistent(node) cfg_file = f"{ignites.config_dir}/{cfg_filename}" ignites.exec_command(node, f"cp {ignites.home_dir}/config/ignite-log4j.xml {cfg_file}") orig = "${IGNITE_HOME}/work/log".replace('/', '\\/') fixed = ignites.log_dir.replace('/', '\\/') ignites.exec_command(node, f"sed -i 's/{orig}/{fixed}/g' {cfg_file}") ignites.start() control_utility = ControlUtility(ignites) control_utility.activate() ignites.await_event("APPLICATION_STREAMING_FINISHED", 60, from_the_beginning=True) try: control_utility.idle_verify() # making sure we have broken data raise IgniteExecutionException("Fail.") except AssertionError: pass control_utility.check_consistency(f"repair {self.CACHE_NAME} 0") # checking/repairing message = "Cache consistency violations recorded." ignites.await_event(message, 60, from_the_beginning=True, log_file="consistency.log") try: ignites.await_event(message, 10, from_the_beginning=True) raise IgniteExecutionException("Fail.") except TimeoutError: pass
def test(self, ignite_version, load_type): """ Tests PME-free switch scenario (node stop). """ data = {} caches = [CacheConfiguration(name='test-cache', backups=2, atomicity_mode='TRANSACTIONAL')] l_type = LoadType.construct_from(load_type) # Checking PME (before 2.8) vs PME-free (2.8+) switch duration, but # focusing on switch duration (which depends on caches amount) when long_txs is false and # on waiting for previously started txs before the switch (which depends on txs duration) when long_txs of true. if l_type is LoadType.EXTRA_CACHES: for idx in range(1, self.EXTRA_CACHES_AMOUNT): caches.append(CacheConfiguration(name="cache-%d" % idx, backups=2, atomicity_mode='TRANSACTIONAL')) config = IgniteConfiguration(version=IgniteVersion(ignite_version), caches=caches, cluster_state="INACTIVE") num_nodes = len(self.test_context.cluster) - 2 self.test_context.logger.info("Nodes amount calculated as %d." % num_nodes) ignites = IgniteService(self.test_context, config, num_nodes=num_nodes) ignites.start() if IgniteVersion(ignite_version) >= V_2_8_0: ControlUtility(ignites).disable_baseline_auto_adjust() ControlUtility(ignites).activate() client_config = config._replace(client_mode=True, discovery_spi=from_ignite_cluster(ignites, slice(0, num_nodes - 1))) long_tx_streamer = IgniteApplicationService( self.test_context, client_config, java_class_name="org.apache.ignite.internal.ducktest.tests.pme_free_switch_test.LongTxStreamerApplication", params={"cacheName": "test-cache"}, startup_timeout_sec=180) if l_type is LoadType.LONG_TXS: long_tx_streamer.start() single_key_tx_streamer = IgniteApplicationService( self.test_context, client_config, java_class_name="org.apache.ignite.internal.ducktest.tests.pme_free_switch_test." "SingleKeyTxStreamerApplication", params={"cacheName": "test-cache", "warmup": 1000}, startup_timeout_sec=180) single_key_tx_streamer.start() ignites.stop_node(ignites.nodes[num_nodes - 1]) single_key_tx_streamer.await_event("Node left topology", 60, from_the_beginning=True) if l_type is LoadType.LONG_TXS: time.sleep(30) # keeping txs alive for 30 seconds. long_tx_streamer.stop_async() single_key_tx_streamer.await_event("Node left topology", 60, from_the_beginning=True) single_key_tx_streamer.await_event("APPLICATION_STREAMED", 60) # waiting for streaming continuation. single_key_tx_streamer.stop() data["Worst latency (ms)"] = single_key_tx_streamer.extract_result("WORST_LATENCY") data["Streamed txs"] = single_key_tx_streamer.extract_result("STREAMED") data["Measure duration (ms)"] = single_key_tx_streamer.extract_result("MEASURE_DURATION") data["Server nodes"] = num_nodes return data
def get_server_config(ignite_version): affinity = Bean( "org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction", partitions=16384, affinityBackupFilter=Bean( "org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeColocatedBackupFilter", constructor_args=["CELL"])) cache_templates = [ CacheConfiguration(name="PARTITIONED*", cache_mode="PARTITIONED", atomicity_mode="TRANSACTIONAL", statistics_enabled=True, affinity=affinity), CacheConfiguration( name="AffinityTemplate*", cache_mode="PARTITIONED", atomicity_mode="TRANSACTIONAL", statistics_enabled=True, affinity=affinity, affinity_mapper=Bean( "org.apache.ignite.internal.ducktest.tests.self_test.TestAffinityMapper" )), ] return IgniteConfiguration( version=IgniteVersion(ignite_version), data_storage=DataStorageConfiguration( checkpoint_frequency=10000, wal_history_size=2147483647, wal_segment_size=1024 * 1024 * 1024, wal_mode="LOG_ONLY", metrics_enabled=True, metrics_rate_time_interval=60000, wal_buffer_size=5242880, wal_compaction_enabled=True, default=DataRegionConfiguration(persistence_enabled=True, max_size=1024 * 1024 * 1024, metrics_enabled=True, metrics_rate_time_interval=1000)), client_connector_configuration=ClientConnectorConfiguration( thread_pool_size=10, thin_client_configuration=ThinClientConfiguration( max_active_compute_tasks_per_connection=100)), transaction_configuration=TransactionConfiguration( default_tx_timeout=300000, default_tx_isolation="READ_COMMITTED", tx_timeout_on_partition_map_exchange=120000), sql_schemas=["schema1", "schema2"], caches=cache_templates, metrics_log_frequency=30000, failure_detection_timeout=120000, rebalance_thread_pool_size=8, peer_class_loading_enabled=True, auto_activation_enabled=False, binary_configuration=BinaryConfiguration(compact_footer=True), communication_spi=TcpCommunicationSpi(idle_connection_timeout=600000, socket_write_timeout=30000, selectors_count=18, connections_per_node=4, use_paired_connections=True, message_queue_limit=0), connector_configuration=ConnectorConfiguration(idle_timeout=180000))
def ignite_start_stop(self, ignite_version, graceful_shutdown, nodes_num, static_clients_num, temp_client, iteration_count, client_work_time): """ Test for starting and stopping fat clients. """ servers_count = nodes_num - static_clients_num - temp_client current_top_v = servers_count # Topology version after test. fin_top_ver = servers_count + (2 * static_clients_num) + ( 2 * iteration_count * temp_client) server_cfg = IgniteConfiguration( version=IgniteVersion(ignite_version), caches=[ CacheConfiguration(name=self.CACHE_NAME, backups=1, atomicity_mode='TRANSACTIONAL') ]) ignite = IgniteService(self.test_context, server_cfg, num_nodes=servers_count) control_utility = ControlUtility(ignite) client_cfg = server_cfg._replace(client_mode=True) static_clients = IgniteApplicationService( self.test_context, client_cfg, java_class_name=self.JAVA_CLIENT_CLASS_NAME, num_nodes=static_clients_num, params={ "cacheName": self.CACHE_NAME, "pacing": self.PACING }) temp_clients = IgniteApplicationService( self.test_context, client_cfg, java_class_name=self.JAVA_CLIENT_CLASS_NAME, num_nodes=temp_client, params={ "cacheName": self.CACHE_NAME, "pacing": self.PACING }) ignite.start() static_clients.start() current_top_v += static_clients_num check_topology(control_utility, current_top_v) # Start / stop temp_clients node. Check cluster. for i in range(iteration_count): self.logger.info(f'Starting iteration: {i}.') temp_clients.start() current_top_v += temp_client await_event(static_clients, f'ver={current_top_v}, locNode=') check_topology(control_utility, current_top_v) await_event(temp_clients, f'clients={static_clients_num + temp_client}') time.sleep(client_work_time) if graceful_shutdown: temp_clients.stop() else: temp_clients.kill() current_top_v += temp_client await_event(static_clients, f'ver={current_top_v}, locNode=') static_clients.stop() check_topology(control_utility, fin_top_ver)