예제 #1
0
 def _print_wait_for(self, message, node_idxs, time, timeout, done):
     nodes_str = ', '.join([str(node_id) for node_id in node_idxs])
     log_put(
         f"Waiting for '{message}' at nodes [{nodes_str}], {time}/{timeout} sec"
     )
     if done:
         stdout.flush()
         log_print('')
    def teardown_testcase(self):
        for ignite in self.get_app_by_type('ignite'):
            ignite.kill_nodes()
            ignite.delete_lfs()

            log_put("Cleanup Ignite LFS ... ")
            commands = {}
            for node_idx in ignite.nodes.keys():
                host = ignite.nodes[node_idx]['host']
                if commands.get(host) is None:
                    commands[host] = [
                        'rm -rf %s/work/*' % ignite.nodes[node_idx]['ignite_home']
                    ]
                else:
                    commands[host].append('rm -rf %s/work/*' % ignite.nodes[node_idx]['ignite_home'])
            results = self.tiden.ssh.exec(commands)
            print(results)
            log_put("Ignite LFS deleted.")
            log_print()
예제 #3
0
    def run(self, artifact_name):
        """
        Run scenario for defined artifact

        :param artifact_name: name from artifact configuration file
        """
        super().run(artifact_name)

        log_print("Running putAll() benchmark with config: %s" % self.config,
                  color='green')

        version, ignite = self.test_class.start_ignite_grid(artifact_name,
                                                            activate=True)

        self.start_probes(artifact_name)

        warmup_runs, prod_runs = self._get_number_of_runs()

        time_result = 0

        with PiClient(ignite, self.test_class.client_config) as piclient:
            cache_names = piclient.get_ignite().cacheNames()
            data_size = int(self.config.get('data_size'))

            log_print("Running {} iterations".format(warmup_runs + prod_runs))
            for i in range(0, warmup_runs + prod_runs):
                self.write_time_event('iteration_%s start' % i)
                warmup_iteration = False if warmup_runs == 0 else i < warmup_runs

                log_print("Running iteration %s (%s)" %
                          (i, 'warmup' if warmup_iteration else 'prod'))

                log_print("Loading %s values per cache into %s caches" %
                          (data_size *
                           (i + 1) - data_size * i, cache_names.size()))

                async_operations = []
                self.write_time_event('iteration_%s create putall' % i)
                for cache_name in cache_names.toArray():
                    async_operation = create_async_operation(
                        create_put_all_operation,
                        cache_name,
                        data_size * i,
                        data_size * (i + 1),
                        int(self.config.get('put_all_batch_size')),
                        value_type=ModelTypes.VALUE_ACCOUNT.value)
                    async_operations.append(async_operation)
                    async_operation.evaluate()

                for async_op in async_operations:
                    async_op.getResult()

                    # skip first operations as warmup
                    if not warmup_iteration:
                        time_result += async_op.getOperation().getEndTime(
                        ) - async_op.getOperation().getStartTime()

                self.write_time_event('iteration_%s putall done' % i)

            log_print("Loading done")

        ignite.cu.deactivate()

        self.stop_probes(time_results=float(time_result) / prod_runs)

        self.results['evaluated'] = True

        ignite.kill_nodes()
        ignite.delete_lfs()

        log_put("Cleanup Ignite LFS ... ")
        commands = {}
        for node_idx in ignite.nodes.keys():
            host = ignite.nodes[node_idx]['host']
            if commands.get(host) is None:
                commands[host] = [
                    'rm -rf %s/work/*' % ignite.nodes[node_idx]['ignite_home']
                ]
            else:
                commands[host].append('rm -rf %s/work/*' %
                                      ignite.nodes[node_idx]['ignite_home'])
        results = self.test_class.tiden.ssh.exec(commands)
        print(results)
        log_put("Ignite LFS deleted.")
        log_print()
예제 #4
0
    def run(self, artifact_name):
        """
        Run scenario for defined artifact

        :param artifact_name: name from artifact configuration file
        """
        super().run(artifact_name)

        log_print("Running putAll() benchmark with config: %s" % self.config,
                  color='green')

        caches_list_file = 'caches_%s.xml' % PUT_ALL_CONFIG_SET \
            if not self.config.get('many_parts') else 'caches_many_parts.xml'
        print_detailed_cache_info = self.config.get(
            'print_detailed_cache_info')

        version = self.test_class.tiden.config['artifacts'][artifact_name][
            'ignite_version']
        try:
            self.test_class.create_app_config_set(
                Ignite,
                PUT_ALL_CONFIG_SET,
                caches_list_file=caches_list_file,
                deploy=True,
                logger=False,
                wal_segment_size=self.test_class.consumption_config.get(
                    'wal_segment_size', 64 * 1024 * 1024),
                logger_path='%s/ignite-log4j2.xml' %
                self.test_class.tiden.config['rt']['remote']
                ['test_module_dir'],
                disabled_cache_configs=False,
                zookeeper_enabled=False,
                checkpoint_read_lock_timeout=self.read_lock_property_value(
                    version),
                # caches related variables
                additional_configs=[
                    'caches.tmpl.xml',
                ],
                part_32=self.test_class.consumption_config.get('part_32', 32),
                part_64=self.test_class.consumption_config.get('part_64', 64),
                part_128=self.test_class.consumption_config.get(
                    'part_64', 128),
            )

            version, ignite = self.test_class.start_ignite_grid(
                artifact_name, activate=True, config_set=PUT_ALL_CONFIG_SET)

            self.start_probes(artifact_name)

            warmup_runs, prod_runs = self._get_number_of_runs()

            time_results = list()
            per_cache_results = {}

            client_config = Ignite.config_builder.get_config(
                'client', config_set_name=PUT_ALL_CONFIG_SET)
            with PiClient(ignite, client_config) as piclient:
                cache_names = piclient.get_ignite().cacheNames()
                data_size = int(self.config.get('data_size'))

                log_print("Running {} iterations".format(warmup_runs +
                                                         prod_runs))
                for i in range(0, warmup_runs + prod_runs):
                    # print message to all nodes log
                    if i == warmup_runs:
                        create_message_operation(
                            'Checkpoint started PRODUCTION RUN STARTED'
                        ).evaluate()

                    self.write_time_event('iteration_%s start' % i)
                    warmup_iteration = False if warmup_runs == 0 else i < warmup_runs

                    log_print("Running iteration %s (%s)" %
                              (i, 'warmup' if warmup_iteration else 'prod'))

                    log_print("Loading %s values per cache into %s caches" %
                              (data_size *
                               (i + 1) - data_size * i, cache_names.size()))

                    async_operations = {}
                    self.write_time_event('iteration_%s create putall' % i)
                    for cache_name in cache_names.toArray():
                        async_operation = create_async_operation(
                            create_put_all_operation,
                            cache_name,
                            data_size * i,
                            data_size * (i + 1),
                            int(self.config.get('put_all_batch_size')),
                            value_type=ModelTypes.VALUE_ACCOUNT.value)
                        async_operations[cache_name] = async_operation
                        async_operation.evaluate()

                    for cache_name, async_op in async_operations.items():
                        async_op.getResult()

                        # skip first operations as warmup
                        if not warmup_iteration:
                            loading_time = async_op.getOperation().getEndTime(
                            ) - async_op.getOperation().getStartTime()

                            if cache_name in per_cache_results:
                                per_cache_results[cache_name] += loading_time
                            else:
                                per_cache_results[cache_name] = loading_time

                            time_results.append(loading_time)

                    self.write_time_event('iteration_%s putall done' % i)

                log_print("Loading done")

                if print_detailed_cache_info:
                    log_print("Per cache results:")

                    for key in sorted(per_cache_results.keys()):
                        print("%s: %s" % (key, per_cache_results[key]))

            ignite.cu.deactivate()

            self.stop_probes(time_results=time_results)

            self.results['evaluated'] = True

            ignite.kill_nodes()
            ignite.delete_lfs()

            log_put("Cleanup Ignite LFS ... ")
            commands = {}
            for node_idx in ignite.nodes.keys():
                host = ignite.nodes[node_idx]['host']
                if commands.get(host) is None:
                    commands[host] = [
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home']
                    ]
                else:
                    commands[host].append(
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home'])
            results = self.test_class.tiden.ssh.exec(commands)
            print(results)
            log_put("Ignite LFS deleted.")
            log_print()
        finally:
            # remove config set
            self.test_class.remove_app_config_set(Ignite, PUT_ALL_CONFIG_SET)
예제 #5
0
    def run_snapshot(self, artifact_name, snapshot_type):
        """
        Run scenario for defined artifact

        :param artifact_name: name from artifact configuration file
        :param snapshot_type: inc/full
        """
        super().run(artifact_name)

        log_print("Running snapshot benchmark with config: %s" % self.config,
                  color='green')

        version = self.test_class.tiden.config['artifacts'][artifact_name][
            'ignite_version']
        incremental_snapshot = True if snapshot_type == 'inc' else False
        try:
            self.test_class.create_app_config_set(
                Ignite,
                SNAPSHOT_CONFIG_SET,
                caches_list_file='caches_%s.xml' % SNAPSHOT_CONFIG_SET,
                deploy=True,
                snapshots_enabled=True,
                logger=False,
                wal_segment_size=self.test_class.consumption_config.get(
                    'wal_segment_size', 64 * 1024 * 1024),
                logger_path='%s/ignite-log4j2.xml' %
                self.test_class.tiden.config['rt']['remote']
                ['test_module_dir'],
                disabled_cache_configs=False,
                zookeeper_enabled=False,
                checkpoint_read_lock_timeout=self.read_lock_property_value(
                    version),
                # caches related variables
                additional_configs=[
                    'caches.tmpl.xml',
                ],
                part_32=self.config.get('part_32', 32),
                part_64=self.config.get('part_64', 64),
                part_128=self.config.get('part_64', 128),
                # artifact config variables
                **self.artifact_config_variables,
            )

            version, ignite = self.test_class.start_ignite_grid(
                artifact_name,
                activate=True,
                config_set=SNAPSHOT_CONFIG_SET,
                jvm_options=self.artifact_jvm_properties)

            time_results = list()
            directory_size = list()

            self.start_probes(artifact_name)

            client_config = Ignite.config_builder.get_config(
                'client', config_set_name=SNAPSHOT_CONFIG_SET)
            PiClientIgniteUtils.load_data_with_putall(
                ignite,
                client_config,
                end_key=int(self.config.get('data_size')))

            if incremental_snapshot:
                ignite.su.snapshot_utility('snapshot', '-type=full')

            # default times to run
            # plus warmup times
            # plus rerun times
            warmup_runs, prod_runs = self._get_number_of_runs()

            log_print("Running {} iterations".format(warmup_runs + prod_runs))
            for i in range(0, warmup_runs + prod_runs):
                self.write_time_event('iteration_%s start' % i)

                warmup_iteration = False if warmup_runs == 0 else i < warmup_runs

                log_print("Running iteration %s, (%s)" %
                          (i, 'warmup' if warmup_iteration else 'prod'))

                ignite.su.snapshot_utility('snapshot',
                                           f'-type={snapshot_type}')

                latest_snapshot_id = ignite.su.snapshots[-1]['id']
                dir_size = get_nodes_directory_size(
                    ignite, self.test_class.ssh, 'work/snapshot/%s' % list(
                        SnapshotScenario.util_find_snapshot_folders_on_fs(
                            ignite, latest_snapshot_id).values())[0])

                m = re.search(
                    'Command \[SNAPSHOT\] successfully finished in (\d*) seconds',
                    ignite.su.latest_utility_output)

                if incremental_snapshot:
                    # todo user remove operation after dr-master merge
                    with PiClient(ignite, client_config) as piclient:
                        ignite_instance = piclient.get_ignite()
                        for cache_name in ignite_instance.cacheNames().toArray(
                        ):
                            ignite_instance.cache(cache_name).removeAll()

                    PiClientIgniteUtils.load_data_with_putall(
                        ignite,
                        client_config,
                        end_key=int(self.config.get('data_size')))

                # skip some operation as warmup
                if not warmup_iteration:
                    assert m, 'Unable to get snapshot time execution'

                    time_results.append(int(m.group(1)))
                    directory_size.append(int(dir_size))

                self.write_time_event('iteration_%s stop' % i)

            ignite.cu.deactivate()

            self.stop_probes(time_results=time_results,
                             avg_snapshot_dir_size=directory_size,
                             seconds=True)

            self.results['evaluated'] = True

            ignite.kill_nodes()
            ignite.delete_lfs()

            log_put("Cleanup Ignite LFS ... ")
            commands = {}
            for node_idx in ignite.nodes.keys():
                host = ignite.nodes[node_idx]['host']
                if commands.get(host) is None:
                    commands[host] = [
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home']
                    ]
                else:
                    commands[host].append(
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home'])
            results = self.test_class.tiden.ssh.exec(commands)
            print(results)
            log_put("Ignite LFS deleted.")
            log_print()
        finally:
            # remove config set
            self.test_class.remove_app_config_set(Ignite, SNAPSHOT_CONFIG_SET)
예제 #6
0
    def run(self, artifact_name):
        """
        Run scenario for defined artifact

        :param artifact_name: name from artifact configuration file
        """
        super().run(artifact_name)

        log_print("Running streamer() benchmark with config: %s" % self.config,
                  color='green')

        version = self.test_class.tiden.config['artifacts'][artifact_name][
            'ignite_version']
        try:
            xml_config_set_name = 'caches_%s.xml' % STREAMER_CONFIG_SET
            self.test_class.create_app_config_set(
                Ignite,
                STREAMER_CONFIG_SET,
                caches_list_file=xml_config_set_name,
                deploy=True,
                logger=False,
                wal_segment_size=self.test_class.consumption_config.get(
                    'wal_segment_size', 64 * 1024 * 1024),
                logger_path='%s/ignite-log4j2.xml' %
                self.test_class.tiden.config['rt']['remote']
                ['test_module_dir'],
                disabled_cache_configs=False,
                zookeeper_enabled=False,
                checkpoint_read_lock_timeout=self.read_lock_property_value(
                    version),
                # caches related variables
                additional_configs=[
                    'caches.tmpl.xml',
                ],
                part_32=self.test_class.consumption_config.get('part_32', 32),
                part_64=self.test_class.consumption_config.get('part_64', 64),
                part_128=self.test_class.consumption_config.get(
                    'part_64', 128),
            )

            version, ignite = self.test_class.start_ignite_grid(
                artifact_name, activate=True, config_set=STREAMER_CONFIG_SET)
            self.start_probes(artifact_name)

            # default times to run
            # plus warmup times
            # plus rerun times
            warmup_runs, prod_runs = self._get_number_of_runs()

            time_results = list()

            client_config = Ignite.config_builder.get_config(
                'client', config_set_name=STREAMER_CONFIG_SET)
            with PiClient(ignite, client_config) as piclient:
                cache_names = piclient.get_ignite().cacheNames()
                data_size = int(self.config.get('data_size'))

                log_print("Running {} iterations".format(warmup_runs +
                                                         prod_runs))
                for i in range(0, warmup_runs + prod_runs):
                    self.write_time_event('iteration_%s start' % i)

                    warmup_iteration = False if warmup_runs == 0 else i < warmup_runs

                    log_print("Running iteration %s (%s)" %
                              (i, 'warmup' if warmup_iteration else 'prod'))

                    log_print("Loading %s values per cache into %s caches" %
                              (data_size *
                               (i + 1) - data_size * i, cache_names.size()))

                    async_operations = []
                    self.write_time_event('iteration_%s create streamer' % i)
                    for cache_name in cache_names.toArray():
                        async_operation = create_async_operation(
                            create_streamer_operation,
                            cache_name,
                            data_size * i,
                            data_size * (i + 1),
                            value_type=ModelTypes.VALUE_ACCOUNT.value)
                        async_operations.append(async_operation)
                        async_operation.evaluate()

                    for async_op in async_operations:
                        async_op.getResult()

                        # skip first operations as warmup otherwise write results
                        if not warmup_iteration:
                            time_results.append(
                                async_op.getOperation().getEndTime() -
                                async_op.getOperation().getStartTime())

                    self.write_time_event('iteration_%s streamer done' % i)

                log_print("Loading done")

            ignite.cu.deactivate()

            self.stop_probes(time_results=time_results)

            self.results['evaluated'] = True

            ignite.kill_nodes()
            ignite.delete_lfs()

            log_put("Cleanup Ignite LFS ... ")
            commands = {}
            for node_idx in ignite.nodes.keys():
                host = ignite.nodes[node_idx]['host']
                if commands.get(host) is None:
                    commands[host] = [
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home']
                    ]
                else:
                    commands[host].append(
                        'rm -rf %s/work/*' %
                        ignite.nodes[node_idx]['ignite_home'])
            results = self.test_class.tiden.ssh.exec(commands)
            print(results)
            log_put("Ignite LFS deleted.")
            log_print()

        finally:
            # remove config set
            self.test_class.remove_app_config_set(Ignite, STREAMER_CONFIG_SET)