Пример #1
0
    def test_blinking_clients_clean_lfs(self):
        """
        IGN-9159 (IGNITE-7165)

        Re-balancing is canceled if client node joins.
        Re-balancing can take hours and each time when client node joins it starts again
        :return:
        """
        self.wait_for_running_clients_num(client_num=0, timeout=120)

        for iteration in range(0, self.iterations):
            log_print("Iteration {}/{}".format(str(iteration + 1),
                                               str(self.iterations)),
                      color='blue')

            self.assert_nodes_alive()

            self.ignite.kill_node(2)
            self.cleanup_lfs(2)
            self.start_node(2)

            # restart clients 4 times
            for restart_time in range(0, 4):
                with PiClient(self.ignite, self.get_client_config()):
                    pass

            self.ignite.jmx.wait_for_finish_rebalance(self.rebalance_timeout,
                                                      self.group_names)
            tiden_assert_equal(
                0,
                self.ignite.find_exception_in_logs('java.lang.AssertionError'),
                "# of AssertionError")
Пример #2
0
    def start_ignite_grid(self, name, cluster, activate=True, already_nodes=0):
        app = self.test_class.get_app(name)
        for node in cluster.nodes:
            node_config = Ignite.config_builder.get_config(node.node_type, config_set_name=node.config_name)
            if node.node_type == 'client':
                # we start client nodes as additional nodes.
                # NB: this overrides cluster.node.id at first run(!)
                node.id = app.add_additional_nodes(node_config, num_nodes=1, server_hosts=[node.host])[0]
            else:
                app.set_node_option(node.id, 'config', node_config)
                app.set_node_option(node.id, 'host', node.host)

        artifact_name = self.test_class.get_app_artifact_name(app.name)

        artifact_cfg = self.test_class.config['artifacts'][artifact_name]

        app.reset()
        log_print("Ignite ver. {}, revision {}".format(artifact_cfg['ignite_version'], artifact_cfg['ignite_revision']))

        app.start_nodes(*cluster.get_server_node_ids(), already_nodes=already_nodes, other_nodes=already_nodes)

        additional_nodes = cluster.get_client_node_ids()
        if additional_nodes:
            app.start_additional_nodes(additional_nodes, client_nodes=True)

        if activate:
            app.cu.activate(activate_on_particular_node=1)

        if not app.jmx.is_started():
            app.jmx.start_utility()

        return app
Пример #3
0
    def restart_empty_grid_with_nodes_count(self, nodes_count):
        self.cu.deactivate()
        util_sleep_for_a_while(5)
        current_nodes = self.ignite.get_alive_default_nodes()
        self.ignite.stop_nodes()
        util_sleep_for_a_while(5)
        self.delete_lfs()
        additional_nodes_count = nodes_count - len(current_nodes)

        if additional_nodes_count < 0:
            print_blue('Going to remove nodes %s' %
                       current_nodes[additional_nodes_count:])
            for node_id in current_nodes[additional_nodes_count:]:
                current_nodes.remove(node_id)
                # if self.ignite.nodes.get(node_id):
                #     del self.ignite.nodes[node_id]

        log_print('Going to start nodes {}'.format(current_nodes))
        self.ignite.start_nodes(*current_nodes)

        if additional_nodes_count > 0:
            additional_nodes_count = nodes_count - len(current_nodes)
            print_blue('Starting %s additional nodes' % additional_nodes_count)
            node_id = list(
                self.ignite.add_additional_nodes(self.get_server_config(),
                                                 additional_nodes_count))
            self.ignite.start_additional_nodes(node_id)

        self.cu.activate()
Пример #4
0
    def setup_testcase(self):
        if self.lfs_stored:
            self.restore_lfs('snapshot_util', timeout=1200)
        self.setup_testcase_without_start_gid()

        activation_timeout = 60
        if self.get_context_variable('sbt_model_enabled'):
            activation_timeout = 200
        self.start_grid(timeout=activation_timeout,
                        activate_on_particular_node=1)

        if not self.lfs_stored:
            if self.get_context_variable('dynamic_cache_enabled'):
                self.start_caches_dynamic(
                    caches_file_name=self.get_context_variable('caches'),
                    batch_size=10000)

            if self.get_context_variable('sbt_model_enabled'):
                PiClientIgniteUtils.load_data_with_txput_sbt_model(
                    self.config,
                    self.ignite,
                    self.get_client_config(),
                    only_caches_batch=None,
                    end_key=int(self.max_key * self.load_multiplier))

            else:
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    end_key=int(self.max_key * self.load_multiplier),
                    allow_overwrite=True)
        log_print(repr(self.ignite), color='debug')
Пример #5
0
    def test_blinking_node_with_extra_node_in_blt(self):
        # IGNITE-8893
        with PiClient(self.ignite, self.get_client_config()):
            self.ignite.start_additional_nodes(
                self.ignite.add_additional_nodes(self.get_server_config()))

            for iteration in range(0, self.iterations):
                log_print("Iteration {}/{}".format(str(iteration + 1),
                                                   str(self.iterations)),
                          color='blue')

                self.assert_nodes_alive()

                self.ignite.kill_node(2)

                util_sleep(10)

                self.ignite.start_node(2)

                self.ignite.jmx.wait_for_finish_rebalance(
                    self.rebalance_timeout, self.group_names)
                util_sleep(60)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
Пример #6
0
    def test_with_repeated_test_and_fail_on_iteration_3(self):
        log_print('This is just fake test to test repeated decorator!!!')
        self._mock_test_execution(method_name=inspect.stack()[0].function)
        self.count += 1

        if self.count == 3:
            raise TidenException('Exception on iteration 3')
    def test_host_ignite(self):
        self.start_grid('host')
        nodes_num = len(self.ignite.get_alive_default_nodes())
        self.ignite.cu.activate()
        self.ignite.cu.control_utility('--baseline')

        self.load_data_with_streamer(
            start_key=1,
            end_key=1000,
            value_type=
            'org.apache.ignite.piclient.model.values.AllTypesIndexed',
            ignite=self.ignite)
        self.ignite.start_node_inside(1)
        self.ignite.wait_for_topology_snapshot(server_num=nodes_num - 1)

        util_sleep_for_a_while(10)
        self.ignite.start_node_inside(1)

        self.ignite.wait_for_topology_snapshot(server_num=nodes_num)
        self.load_data_with_streamer(
            start_key=1,
            end_key=1000,
            allow_overwrite=True,
            value_type=
            'org.apache.ignite.piclient.model.values.AllTypesIndexed',
            ignite=self.ignite)
        log_print('Done')
Пример #8
0
    def start_ignite_grid(self, name, activate=False, already_nodes=0, config_set='base', jvm_options=None):
        app = self.get_app(name)
        app.set_node_option('*', 'config',
                            Ignite.config_builder.get_config('server', config_set_name=config_set))

        if jvm_options:
            app.set_node_option('*', 'jvm_options', jvm_options)

        artifact_cfg = self.tiden.config['artifacts'][app.name]

        app.reset()
        version = artifact_cfg['ignite_version']
        log_print("Ignite ver. %s, revision %s" % (
            version,
            artifact_cfg['ignite_revision']
        ))

        app.set_snapshot_timeout(360)
        app.set_grid_name(name)
        app.set_additional_grid_name(False)

        # copy piclient to work directories
        self.util_exec_on_all_hosts(app, [
            'cp %s %s/libs/' % (self.tiden.config['artifacts']['piclient']['remote_path'],
                                self.tiden.config['artifacts'][name]['remote_path']),
        ])

        app.start_nodes(already_nodes=already_nodes)

        if activate:
            app.cu.activate(activate_on_particular_node=1)

        return version, app
Пример #9
0
 def _wait_exchange_finished(self,
                             ignite,
                             major_topVer,
                             minor_topVer=0,
                             max_tries=100,
                             sleep_between_tries=5):
     n_tries = 0
     exchange_finished = False
     n_servers = None
     n_expected_servers = len(ignite.get_alive_default_nodes())
     while not exchange_finished and n_tries < max_tries:
         if n_tries > 0:
             sleep(sleep_between_tries)
         log_put(
             "Waiting for exchange topVer=%s to complete on: %s/%s servers, timeout %s/%s sec"
             % (
                 major_topVer,
                 '?' if n_servers is None else str(n_servers),
                 n_expected_servers,
                 str(n_tries * sleep_between_tries),
                 str(max_tries * sleep_between_tries),
             ))
         stdout.flush()
         self.exchanges = self._get_exchanges(ignite)
         exchange_finished, n_servers = self.exchanges.is_exchange_finished(
             major_topVer, minor_topVer, n_expected_servers)
         n_tries += 1
     log_print('')
Пример #10
0
    def _patch_config(self, input_config):
        env = deepcopy(environ)
        output_config = {}
        if len(self.expand_var_names) == 0:
            self._patch_config_with_env(input_config, output_config, env)
        else:
            orig_vars = {}
            for var_name in self.expand_var_names:
                orig_vars[var_name] = env[var_name]
                del env[var_name]
            for var_name in self.expand_var_names:
                var_value = environ.get(var_name, '')
                var_values = var_value.split(',')
                output_config = {}
                if len(var_values) == 0:
                    var_values = ['']
                for var_value in var_values:
                    env[var_name] = var_value
                    tmp_config = {}
                    self._patch_config_with_env(input_config, tmp_config, env)
                    mergedict(tmp_config, output_config)
                del env[var_name]
                input_config = output_config

        for var in self.missing_vars:
            log_print(
                f'WARN: environment variable {var} referenced in config is not set or empty',
                color='red')
        return output_config
    def teardown_testcase(self):
        self.logger.info('TestTeardown is called')

        print_debug(repr(self.ignite))
        log_print('All additional nodes: %s Alive additional nodes: %s' %
                  (self.ignite.get_all_additional_nodes(),
                   self.ignite.get_alive_additional_nodes()),
                  color='blue')

        for additional_node in self.ignite.get_alive_additional_nodes():
            self.ignite.kill_node(additional_node)

        # kill utility if exists
        if self.ignite.jmx.is_started():
            self.ignite.jmx.kill_utility()

        self.stop_grid_hard()
        self.su.copy_utility_log()

        if self.get_context_variable('zookeeper_enabled'):
            self.zoo.stop_zookeeper()

        self.ignite.cleanup_work_dir()
        self.set_current_context()
        self.ignite.reset(hard=True)
        print_debug(repr(self.ignite))
Пример #12
0
    def _wait_for_same_caches_size(self, piclient_master, piclient_replica, how_long=300, predicate=None):
        from datetime import datetime
        start = datetime.now()
        iteration = 0
        delay = 5
        while True:
            cache_mask = lambda x: '' in x
            if predicate:
                cache_mask = predicate
            master_sizes = self.get_caches_size(cache_mask, piclient=piclient_master, debug=True)
            replica_sizes = self.get_caches_size(cache_mask, piclient=piclient_replica, debug=True)

            if master_sizes == replica_sizes:
                break

            self._compare_dicts(master_sizes, replica_sizes, debug=False)
            util_sleep_for_a_while(delay)
            iteration += 1
            log_print('Waiting for {} seconds. Master size={}, replica size={}'.format(iteration * delay, master_sizes,
                                                                                       replica_sizes))
            if (datetime.now() - start).seconds > how_long:
                self._compare_dicts(master_sizes, replica_sizes)
                raise TidenException('Caches size were not sync for {} seconds.'.format(how_long))

        execution_time = (datetime.now() - start).seconds
        log_print('Caches size have had sync for {} seconds.'.format(execution_time))
Пример #13
0
    def test_two_blinking_nodes_clean_lfs(self):
        with PiClient(self.ignite, self.get_client_config()):

            with ExitStack() as stack:
                if is_enabled(self.config.get('zookeeper_enabled')) and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 2))

                for iteration in range(0, self.iterations):
                    log_print("Iteration {}/{}".format(str(iteration + 1),
                                                       str(self.iterations)),
                              color='blue')

                    self.assert_nodes_alive()

                    self.ignite.kill_node(2)
                    self.cleanup_lfs(2)
                    # self.ignite.start_node(2)
                    self.start_node(2)

                    self.ignite.kill_node(3)

                    util_sleep(10)

                    # self.ignite.start_node(3)
                    self.start_node(3)

                    self.ignite.jmx.wait_for_finish_rebalance(
                        self.rebalance_timeout, self.group_names)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
Пример #14
0
    def test_blinking_node_baseline(self):
        # IGNITE-8879
        with PiClient(self.ignite, self.get_client_config()):
            for iteration in range(0, self.iterations):
                log_print("Iteration {}/{}".format(str(iteration + 1),
                                                   str(self.iterations)),
                          color='blue')

                self.assert_nodes_alive()

                self.ignite.kill_node(2)
                self.ignite.wait_for_topology_snapshot(
                    server_num=len(self.ignite.get_alive_default_nodes()))

                self.cu.set_current_topology_as_baseline()

                util_sleep(5)

                self.ignite.start_node(2)
                self.ignite.wait_for_topology_snapshot(
                    server_num=len(self.ignite.get_alive_default_nodes()))
                self.cu.set_current_topology_as_baseline()

                self.ignite.jmx.wait_for_finish_rebalance(
                    self.rebalance_timeout, self.group_names)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
 def setup_testcase_no_grid_start(self):
     log_print('TestSetup is called', color='green')
     self.logger.info('TestSetup is called')
     self.ignite.set_activation_timeout(20)
     self.ignite.set_snapshot_timeout(120)
     self.util_copy_piclient_model_to_libs()
     self.su.clear_snapshots_list()
Пример #16
0
 def before_hosts_setup(self, *args, **kwargs):
     self.correct_init = False
     auth_info = self.auth()
     if not 'user' in auth_info:
         log_print(
             'Not correct SLACK_TOKEN to auth, please verify, receive token - {}'
             .format(self.slack_token),
             color='red')
         self.correct_init = False
         return
     self.user = self.options.get('direct_name', auth_info['user'])
     self.correct_init = True
     self.log_print(
         "Connect to Slack complete, bot_name - {}, append to {}{}".format(
             self.bot_name, self.predicate, self.user))
     build_log = environ.get('BUILD_URL', False)
     self.build_log_format = "[{\"text\":\"Ver: %s, Log: %s\"}]" % (
         environ.get('IGNITE_VERSION', 'undef'),
         '{}console'.format(build_log) if build_log else 'local-run')
     self.git_branch_name = environ.get('BRANCH', 'undef')
     if self.git_branch_name == 'undef':
         try:
             self.git_branch_name = check_output(
                 ['git', 'rev-parse', '--abbrev-ref',
                  'HEAD']).decode("utf-8").rstrip()
         except:
             self.git_branch_name = None
    def util_deploy_pmi_tool(self):
        commands = {}

        hosts = self.ignite.config['environment'].get('client_hosts', [])
        pmi_remote_path = self.config['artifacts']['pmi_tool']['remote_path']
        ignite_remote_path = self.config['artifacts']['ignite']['remote_path']
        for host in hosts:
            if commands.get(host) is None:
                commands[host] = [
                    "cp %s/bin/pmi-utility.sh %s/bin/pmi-utility.sh" % (
                        pmi_remote_path, ignite_remote_path
                    ),
                    "chmod +x %s/bin/pmi-utility.sh" % ignite_remote_path,
                    "cp -r %s/bin/include/pmi-utility/ %s/bin/include/" % (
                        pmi_remote_path, ignite_remote_path
                    )
                ]
            else:
                commands[host].append('cp %s/bin/pmi-utility.sh %s/bin/pmi-utility.sh' % (
                    pmi_remote_path, ignite_remote_path))
                commands[host].append('chmod +x %s/bin/pmi-utility.sh' % ignite_remote_path)
                commands[host].append('cp -r %s/bin/include/pmi-utility/ %s/bin/include/' % (
                    pmi_remote_path, ignite_remote_path
                ))

        log_print(commands)
        results = self.ssh.exec(commands)
        log_print(results)
Пример #18
0
    def check_snapshots_listing_on_all_nodes(self,
                                             snapshots,
                                             snapshot_path=None):
        path = 'work/snapshot/'
        if snapshot_path:
            path = snapshot_path
        output = self.run_on_all_nodes('ls %s' % path)

        if isinstance(snapshots, list):
            expecting_snapshots = list(snapshots)
        else:
            expecting_snapshots = [str(snapshots)]

        for node_id in output.keys():
            found = []
            snapshot_folders = [
                item for item in output[node_id].split('\n') if item
            ]

            tiden_assert_equal(len(expecting_snapshots), len(snapshot_folders),
                               'Number of folders in work/snapshot on server')

            for snapshot in expecting_snapshots:
                log_print(snapshot)
                found += [
                    folder for folder in snapshot_folders if snapshot in folder
                ]

            tiden_assert_equal(
                len(expecting_snapshots), len(found),
                'All folders in work/snapshot:\n%s\ncorrespond expected snapshots: %s'
                % (','.join(found), ','.join(expecting_snapshots)))
Пример #19
0
    def start_ignite_grid(self,
                          name,
                          num_servers=None,
                          activate=True,
                          pin_baseline=True,
                          pin_coordinator=False,
                          config_set='base',
                          run_id=0):
        def _calc_server_node_ids(ignite):
            """
            Due to current tiden AppTestCase limitations we
                - can't change servers_per_host during test session
                - can't change number of servers to larger values than we already have
            because that implies re-running Ignite.setup().

            So, for the suite we set num_server_hosts to maximum of all tests, and during test we start only subset of
            server nodes starting from first.
            :return:
            """

            return [
                ignite.get_start_server_idx() + i * self.max_servers_per_host
                for i in range(num_servers)
            ]

        app = self.get_app(name)
        from apps.ignite import Ignite
        ignite = Ignite(app)
        ignite.set_grid_name('run%d' % run_id)

        self.server_config = Ignite.config_builder.get_config(
            'server', config_set_name=config_set)
        self.client_config = Ignite.config_builder.get_config(
            'client', config_set_name=config_set)
        ignite.set_node_option('*', 'config', self.server_config)
        ignite.reset()
        # ignite.activate_default_modules()

        log_print("Ignite ver. %s, revision %s" % (
            self.artifacts[ignite.name]['ignite_version'],
            self.artifacts[ignite.name]['ignite_revision'],
        ))

        if not num_servers:
            ignite.start_nodes()
        else:
            self.server_node_ids = _calc_server_node_ids(ignite)
            if pin_coordinator:
                coordinator_node = self.server_node_ids.pop(0)
                ignite.start_nodes(coordinator_node)
            ignite.start_nodes(*self.server_node_ids)

        if activate:
            ignite.cu.activate(activate_on_particular_node=1)

        if pin_baseline:
            ignite.cu.set_current_topology_as_baseline()

        return self.artifacts[ignite.name]['ignite_version'], app
    def test_util_1_8_counters_detection_during_PME_node_from_baseline(self):
        """
        Tests PME synchronise partition counters if some detected.
        :return:
        """
        self.load_data_with_streamer(end_key=1000,
                                     value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value)

        with PiClient(self.ignite, self.get_client_config(), nodes_num=1) as piclient:

            caches_before_lrt = []
            for cache_name in piclient.get_ignite().cacheNames().toArray():
                caches_before_lrt.append(cache_name)

            cache_under_test = caches_before_lrt[0]
            log_print('Cache under test: %s' % cache_under_test, color='blue')

            operation = create_broke_data_entry_operation(cache_under_test, 1, True, 'counter')
            operation.evaluate()

            expected = ['Conflict partition']
            self.cu.control_utility('--cache idle_verify', all_required=expected)

            output = self.cu.latest_utility_output

            grp_id, part_id = None, None
            for line in output.split('\n'):
                m = search('Conflict partition: (PartitionKey|PartitionKeyV2) \[grpId=(\d+),.*partId=(\d+)\]', line)
                if m:
                    grp_id = m.group(2)
                    part_id = m.group(3)

            tiden_assert(grp_id and part_id,
                         'Expecting to find conflicts in output\n{}'.format(self.cu.latest_utility_output))

            # Start one more server node and change baseline to run PME
            log_print("Going to start additional node", color='green')
            self.ignite.add_additional_nodes(self.get_server_config(), 1)
            self.ignite.start_additional_nodes(self.ignite.get_all_additional_nodes())
            self.cu.control_utility('--baseline')
            self.cu.set_current_topology_as_baseline()
            self.cu.control_utility('--baseline')
            msg_in_log = self.find_in_node_log('Partition states validation has failed for group: %s'
                                               % cache_under_test, node_id=1)
            assert msg_in_log != []

            # Check there are no conflicts after PME
            util_sleep_for_a_while(30)
            self.cu.control_utility('--cache', 'idle_verify')

            # Stop one more server node and change baseline to run PME
            self.ignite.kill_node(self.ignite.get_alive_additional_nodes()[0])
            util_sleep_for_a_while(30)
            self.cu.control_utility('--baseline')
            self.cu.set_current_topology_as_baseline()
            self.cu.control_utility('--baseline')

            # Check there are no conflicts after PME
            self.cu.control_utility('--cache', 'idle_verify')
Пример #21
0
 def util_change_snapshot_src(self,
                              snapshot_dir,
                              rename_dir=True,
                              repair=False):
     files = self.util_change_snapshot_src_for_remote_grid(
         snapshot_dir, rename_dir, repair)
     log_print(files)
     return files
 def teardown_testcase(self):
     log_print('TestTeardown is called', color='green')
     self.ignite.set_activation_timeout(10)
     self.ignite.set_snapshot_timeout(30)
     self.stop_grid_hard()
     self.cleanup_lfs()
     self.remove_additional_nodes()
     self.set_current_context()
     self.reset_cluster()
     log_print(repr(self.ignite), color='debug')
 def restart_clients(self, nodes, iterations):
     for i in range(1, iterations):
         log_print('Going to start client nodes %s' % nodes)
         self.ignite.start_additional_nodes(nodes,
                                            client_nodes=True,
                                            skip_topology_check=True)
         util_sleep(2)
         self.ignite.update_starting_node_attrs()
         log_print('Going to kill client nodes %s' % nodes)
         self.ignite.kill_nodes(*nodes)
Пример #24
0
    def start_clusters(self, clusters):
        log_print('Starting clusters', color='debug')
        clusters[0].grid = self.start_ignite_grid(self.artifact_name + '_dc1', clusters[0])
        clusters[1].grid = self.start_ignite_grid(self.artifact_name + '_dc2', clusters[1])

        self.master = self.clusters[0].grid
        self.replica = self.clusters[1].grid
        self.master_client_config = \
            Ignite.config_builder.get_config('client', config_set_name='cluster_1_node_without_dr')
        self.replica_client_config = \
            Ignite.config_builder.get_config('client', config_set_name='cluster_2_node_without_dr')
Пример #25
0
    def test_mixed_cluster_load_caches_old_server(self):
        """
        1. start mixed cluster (new version servers + old version servers)
        2. activate from new version control.sh
        3. start old version server
        4. add it to baseline
        5. smoke check:
        5.1. create dynamic caches from old server node
        5.2. do some load from old server node
        """

        self.ignite_new_version.cu.activate()
        created_caches = []
        self.server_config = Ignite.config_builder.get_config(
            'server', config_set_name='base')
        ignite = self.ignite_old_version
        with PiClient(ignite, self.server_config, nodes_num=1) as piclient:
            ignite.cu.add_node_to_baseline(
                ignite.get_node_consistent_id(piclient.node_ids[0]))

            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')

                ignite = piclient.get_ignite()

                ignite.getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

            with TransactionalLoading(MixedTestLoadingAdapter(self),
                                      config_file=self.server_config,
                                      loading_profile=LoadingProfile(
                                          delay=1,
                                          transaction_timeout=100000)):
                sleep(60)
Пример #26
0
 def _compare_dicts(dict_1: dict, dict_2: dict, debug=True):
     equals = True
     for key, value in dict_1.items():
         if not key in dict_2:
             log_print(f'Cache {key} is not found on replica \n{dict_2}')
             equals = False
         else:
             if not value == dict_2.get(key):
                 if debug:
                     log_print(f'Values for cache {key} are not equal:\n master={value}\nreplica={dict_2.get(key)}',
                               color='debug')
                 equals = False
     return equals
Пример #27
0
 def run_ignite_grid(self):
     ignite_app = Ignite(self.get_app_by_type('ignite')[0])
     artifact_cfg = self.tiden.config['artifacts'][ignite_app.name]
     ignite_app.reset()
     log_print("Ignite ver. %s, revision %s" % (
         artifact_cfg['ignite_version'],
         artifact_cfg['ignite_revision'],
     ))
     ignite_app.start_nodes()
     ignite_app.cu.activate()
     sleep(10)
     ignite_app.cu.deactivate()
     ignite_app.stop_nodes()
Пример #28
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.artifacts = {}
        self.current_artifact = None

        if self.options.get('host') and self.options.get(
                'login') and self.options.get('password'):
            self.download_url = 'ftp://{}:{}@{}'.format(
                self.options.get('login'), self.options.get('password'),
                self.options.get('host'))
        else:
            raise TidenPluginException(
                'FTP credentials have not found in FtpDownloader plugin configuration.'
            )

        for artifact_name, artifact in self.config['artifacts'].items():
            # artifact = self.config['artifacts'][artifact_name]

            if artifact['glob_path'].startswith('ftp'):
                log_print(
                    'Artifact {} ({}) will be downloaded from FTP'.format(
                        artifact_name, artifact['glob_path']),
                    color='blue')

                self.artifacts[artifact_name] = {
                    'file_name':
                    artifact['glob_path'].split('/')[-1],
                    'path_on_host':
                    '{}/{}'.format(self.config['remote']['suite_var_dir'],
                                   artifact_name),
                    'ftp_url':
                    '{}/{}'.format(self.download_url,
                                   artifact['glob_path'][4:]),
                    'artifact_on_host':
                    '{}/{}'.format(self.config['remote']['artifacts_dir'],
                                   artifact['glob_path'].split('/')[-1])
                }

                if artifact.get('repack'):
                    self.artifacts[artifact_name]['repack'] = True

                if artifact.get('remote_unzip'):
                    self.artifacts[artifact_name]['unzip'] = True

            else:
                log_print(
                    'Artifact {} ({}) will be uploaded from local host'.format(
                        artifact_name, artifact['glob_path']),
                    color='blue')
                continue
    def remove_index_bin_files(self, node_id):
        """
        Remove all index.bin files except cache cache-ignite-sys-caches for particular node.
        :param node_id:
        :return:
        """
        if node_id in self.ignite.nodes.keys():
            host = self.ignite.nodes[node_id]['host']
            ignite_home = self.ignite.nodes[node_id]['ignite_home']

            commands = dict()
            dir_to_search = '{}/work/db/'.format(ignite_home)
            commands[host] = [
                'find {} -name \'index.bin\''.format(dir_to_search)
            ]

            output = self.ignite.ssh.exec(commands)
            files = [
                file for file in output[host][0].split('\n')
                if file and 'cache-ignite-sys-caches' not in file
            ]
            log_print(files, color='debug')
            log_print(commands, color='debug')
            commands[host] = [
                ';'.join(['rm {}'.format(file) for file in files])
            ]
            output = self.ignite.ssh.exec(commands)
            log_print(output, color='debug')
        else:
            log_print("Node id {} not found in server nodes {}".format(
                node_id, self.ignite.nodes.keys()),
                      color='red')
Пример #30
0
 def _build_maven_artifact(self, artifact_name, build_path, mvn_args):
     log_print(f"Building maven artifact '{artifact_name}' ...")
     build_path = abspath(build_path)
     current_directory = getcwd()
     timestamp = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
     self.build_log_name = join(build_path, f'maven-build-{timestamp}.log')
     chdir(build_path)
     try:
         rc = system(' '.join([
             self.mvn_command, mvn_args, f' > {self.build_log_name} 2>&1 '
         ]))
     finally:
         chdir(current_directory)
     return rc == 0