def setup_test(self, **kwargs): self.ignite.set_node_option('*', 'config', self.get_server_config()) extended_jvm_options = [] if 'exchange_history_size' in kwargs: extended_jvm_options.extend([ '-DIGNITE_EXCHANGE_HISTORY_SIZE=%d' % int(kwargs['exchange_history_size']) ]) if extended_jvm_options: self.jvm_options = self.get_extended_jvm_options( extended_jvm_options) else: self.jvm_options = self.get_default_jvm_options() if is_enabled(self.config.get('enable_jfr_and_gc_logs', False)): for node_id in self.ignite.nodes.keys(): add_jfr = [ "-Xloggc:%s/gc-%s.log" % (self.config['rt']['remote']['test_dir'], node_id), "-XX:+PrintGCDetails", "-verbose:gc", "-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-XX:+PrintGCDateStamps" ] current_jvm_options = self.jvm_options + add_jfr self.ignite.set_node_option(node_id, 'jvm_options', current_jvm_options) else: self.ignite.set_node_option('*', 'jvm_options', self.jvm_options) self.util_copy_piclient_and_test_tools_models_to_libs() if self.get_context_variable('zookeeper_enabled'): self.zoo.start() self.ignite.jmx.start_utility()
def start_node(self, node_id): """ Cause of Zookeeper if nodes restarts too quickly, we can get the error on nodes start like "Failed to add node to topology because it has the same hash code for partitioned affinity..." But after that if nodes starts again, it should be added to topology. This method does exactly this - Starts node, if there is a error, start node again. :param node_id: :return: """ if is_enabled( self.config.get('zookeeper_nodes_restart')) or is_enabled( self.config.get('zookeeper_enabled')): self._util_start_node_several_times(node_id) else: if self.ignite.is_additional_node(node_id): self.ignite.start_additional_nodes(node_id) else: self.ignite.start_node(node_id)
def initialize_config(self): self.historical_rebalance = self.config.get('historical_rebalance', False) self.metrics_idle = self.config.get('metrics_idle', 30) self.with_loading = self.config.get('with_loading', False) self.idle_verify = is_enabled(self.config.get('idle_verify')) self.load_type = self.config.get('load_type', DEFAULT_LOAD_TYPE) self.single_cache = 'single_cache' in self.config self.parts_distribution = self.config.get('partition_distribution', None) self.with_no_rebalance_cache = self.config.get('with_no_rebalance_cache', False) self.jfr_settings = self.config.get('jfr_settings', None) if self.with_loading and self.historical_rebalance: log_print('There is no support historical rebalance with loading. Skipping loading.') self.with_loading = False if self.idle_verify and self.with_loading: log_print('Skipping idle_verify parameter because of with_loading used', color='yellow')
def _wait_cluster_ready(self): if self.get_context_variable( 'pitr_enabled') and self.get_context_variable( 'snapshots_enabled'): self.su.wait_no_snapshots_activity_in_cluster() self.group_names = PiClientIgniteUtils.collect_cache_group_names( self.ignite, self.get_client_config()) if not self.ignite.jmx.is_started(): self.ignite.jmx.start_utility() # wait for no client on cluster self.ignite.wait_for_topology_snapshot(client_num=0) if is_enabled(self.config.get('disable_baseline_autoadjustment')): log_print("Going to disable baseline autoadjustment", color='green') if self.cu.is_baseline_autoajustment_supported(): self.cu.disable_baseline_autoajustment() log_print("Baseline autoadjustment disabled", color='green') log_print(repr(self.ignite), color='debug')
def generate_app_config(self, clusters): discovery_port_prefix = 4750 communication_port_prefix = 4710 create_dir = [] for cluster in clusters: for node in cluster.nodes: fs_store_dir = None if self.fs_store_path: fs_store_dir = '{}/fs_store_{}{}'.format( self.tiden.config['rt']['remote']['test_dir'], cluster.id, node.id) create_dir.append('mkdir {}'.format(fs_store_dir)) config_name = f'cluster_{cluster.id}_node_{node.id}' node.config_name = config_name self.create_app_config_set( Ignite, config_name, config_type=node.node_type, deploy=True, consistent_id=True, caches='caches.xml', disabled_cache_configs=True, zookeeper_enabled=False, addresses=self.tiden.config['environment']['server_hosts'], discovery_port_prefix=discovery_port_prefix, communication_port_prefix=communication_port_prefix, node=node, ssl_enabled=is_enabled( self.tiden.config.get('ssl_enabled')), pds_enabled=PDS_ENABLED, fs_store_path=True, fs_store_path_value=fs_store_dir, logger=True, logger_path='%s/ignite-log4j2.xml' % self.tiden.config['rt']['remote']['test_module_dir'], custom_conflict_resolver=True, ) # generate config without sender/receiver settings for piclient piclient_node = copy(cluster.nodes[1]) piclient_node.sender_nodes = [] piclient_node.receiver_nodes = [] config_name = f'cluster_{cluster.id}_node_without_dr' self.create_app_config_set( Ignite, config_name, config_type='client', deploy=True, consistent_id=True, caches='caches.xml', disabled_cache_configs=True, zookeeper_enabled=False, addresses=self.tiden.config['environment']['server_hosts'], discovery_port_prefix=discovery_port_prefix, communication_port_prefix=communication_port_prefix, node=piclient_node, pds_enabled=PDS_ENABLED, ssl_enabled=is_enabled(self.tiden.config.get('ssl_enabled')), logger=True, logger_path='%s/ignite-log4j2.xml' % self.tiden.config['rt']['remote']['test_module_dir'], custom_conflict_resolver=True, ) discovery_port_prefix += 1 communication_port_prefix += 1 if self.fs_store_path: self.tiden.ssh.exec(create_dir) return clusters
def initialize_config(self): self.metrics_idle = self.config.get('metrics_idle', 30) self.with_loading = self.config.get('with_loading', False) self.idle_verify = is_enabled(self.config.get('idle_verify')) self.jfr_settings = self.config.get('jfr_settings', None)
def generate_app_config(self, clusters, events_enabled=True, configuration_ruler=None, **kwargs): discovery_port_prefix = 4750 communication_port_prefix = 4710 fs_store_path = True create_dir = [] for cluster in clusters: for node in cluster.nodes: fs_store_dir = None if fs_store_path and node.is_sender(): fs_store_dir = '{}/fs_store_{}{}'.format(self.test_class.config['rt']['remote']['test_dir'], cluster.id, node.id) # fs_store_dir = fs_store_dir.replace('ssd', 'hdd') create_dir.append('mkdir -p {}'.format(fs_store_dir)) config_name = f'cluster_{cluster.id}_node_{node.id}' node.config_name = config_name additional_configs = configuration_ruler(cluster, node) if configuration_ruler is not None else { 'group_names': ['group1', 'dr']} self.test_class.create_app_config_set(Ignite, config_name, deploy=True, config_type=[node.node_type, 'caches'], consistent_id=True, caches=f'caches_{config_name}.xml', disabled_cache_configs=True, zookeeper_enabled=False, addresses=cluster.get_server_hosts(), discovery_port_prefix=discovery_port_prefix, communication_port_prefix=communication_port_prefix, node=node, ssl_enabled=is_enabled(self.test_class.config.get('ssl_enabled')), fs_store_path=True, fs_store_path_value=fs_store_dir, # snapshots_enabled=self.snapshot_storage, events_enabled=events_enabled, additional_configs=['caches.tmpl.xml'], **additional_configs) # generate config without sender/receiver settings for piclient piclient_node = copy(cluster.nodes[1]) piclient_node.sender_nodes = [] piclient_node.receiver_nodes = [] config_name = f'cluster_{cluster.id}_node_without_dr' self.test_class.create_app_config_set(Ignite, config_name, config_type='client', deploy=True, consistent_id=True, caches=f'caches_{config_name}.xml', # caches='caches.xml', disabled_cache_configs=True, zookeeper_enabled=False, addresses=cluster.get_server_hosts(), discovery_port_prefix=discovery_port_prefix, communication_port_prefix=communication_port_prefix, node=piclient_node, ssl_enabled=is_enabled(self.test_class.config.get('ssl_enabled')), events_enabled=events_enabled, group_names=['group1'], additional_configs=['caches.tmpl.xml']) cluster.piclient_config = Ignite.config_builder.get_config('client', config_set_name=config_name) discovery_port_prefix += 1 communication_port_prefix += 1 if fs_store_path: self.test_class.ssh.exec(create_dir) return clusters
def setup(self): self.ignite.jmx.rebalance_collect_timeout = 5 default_context = self.contexts['default'] default_context.add_context_variables( snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')), pitr_enabled=is_enabled(self.config.get('pitr_enabled')), zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')), zoo_connection=self.zoo._get_zkConnectionString() if is_enabled( self.config.get('zookeeper_enabled')) else None, rebalance_pool_size=int(self.config.get('rebalance_pool_size', 4)), wal_segment_size=int(self.config.get('wal_segment_size', 1000000)), num_wal_segments=int(self.config.get('num_wal_segments', 5)), logger_enabled=False, ) # context with configs for backup flickering test backup_context = self.create_test_context('backup') backup_context.add_config('caches.tmpl.xml', 'caches_backup.xml') self.prepare_backup_test_config(self.config) backup_context.add_context_variables( snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')), pitr_enabled=is_enabled(self.config.get('pitr_enabled')), zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')), zoo_connection=self.zoo._get_zkConnectionString() if is_enabled( self.config.get('zookeeper_enabled')) else None, rebalance_pool_size=int(self.config.get('rebalance_pool_size', 4)), cache_group_mult=int(self.config.get('cache_group_mult', 3)), wal_history_size=int(self.config.get('wal_history_size', 10000)), num_wal_segments=int(self.config.get('num_wal_segments', 20)), caches_file='backup', test_config=self.config, logger_enabled=True, logger_path='%s/ignite-log4j2.xml' % self.config['rt']['remote']['test_module_dir'], ) # context with configs for 24 Hour Fitness fitness_context = self.create_test_context('fitness') fitness_context.add_context_variables( snapshots_enabled=True, pitr_enabled=True, zookeeper_enabled=False, caches_file='fitness', rebalance_pool_size=int(self.config.get('rebalance_pool_size', 8)), wal_segment_size=int(self.config.get('wal_segment_size', 1000000)), num_wal_segments=int(self.config.get('num_wal_segments', 5)), logger_enabled=True, logger_path='%s/ignite-log4j2.xml' % self.config['rt']['remote']['test_module_dir'], ) diff_pool_context = self.create_test_context('diff_pool') diff_pool_context.add_context_variables( snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')), pitr_enabled=is_enabled(self.config.get('pitr_enabled')), zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')), zoo_connection=self.zoo._get_zkConnectionString() if is_enabled( self.config.get('zookeeper_enabled')) else None, rebalance_pool_size=1, wal_segment_size=int(self.config.get('wal_segment_size', 1000000)), num_wal_segments=int(self.config.get('num_wal_segments', 5)), logger_enabled=False, ) super().setup() self.logger = get_logger('tiden') self.logger.set_suite('[TestRebalance]')