def test_check_permissions_to_run_SCHEDULE_DELETE_command(self): """ Run snapshot utility SCHEDULE -DELETE command against all permissions configuration (see self.auth_creds). :return: """ self.util_enable_server_security() self.start_grid() for auth_cred in self.auth_creds: self.su.snapshot_utility('SCHEDULE', '-command=create', '-name=sys_sched_%s' % auth_cred, '-full_frequency=daily') for auth_cred in self.auth_creds: self.util_enable_client_security(auth_cred) try: self.su.snapshot_utility( 'SCHEDULE', '-delete', '-name=sys_sched_%s' % auth_cred, ) print_green( 'Snapshot Utility SCHEDULE -delete command could be executed with "%s" credentials ' % auth_cred) except Exception as e: print_red( 'Snapshot Utility S0CHEDULE -create command could NOT be executed with "%s" credentials ' % auth_cred) pass
def collect_metrics(self, start_time, end_time): # TODO: replace this with method that knows all hosts hosts = get_host_list( self.config['environment']['server_hosts'], self.config['environment']['client_hosts'], self.config['environment'].get('coordinator_host', [])) data = self.zabbix_api.collect_metrics_from_servers(hosts, self.metrics, start_time, stop_time=end_time) test_name = '' if self.config['rt']['remote'].get('test_dir'): test_name = self.config['rt']['remote'].get('test_dir').split( '/')[-1] if self.create_plot: # if we can't import matplotlib don't fail the tests try: self.create_simple_plot(data, self.config['suite_var_dir'], plot_config=self.plot_config, extend_name=test_name) except ImportError: print_red( 'Error: matplotlib module could not be imported. Result will be save into text file.' ) self.write_result_to_file(data, self.config['suite_var_dir'], extend_name=test_name) else: self.write_result_to_file(data, self.config['suite_var_dir'], extend_name=test_name)
def test_check_permissions_to_run_SNAPSHOT_command(self): """ Run snapshot utility SNAPSHOT command against all permissions configuration (see self.auth_creds). :return: """ self.util_enable_server_security() self.start_grid() self.load_data_with_streamer() for auth_cred in self.auth_creds: self.su.snapshot_utility('SNAPSHOT', '-type=FULL') self.snapshot_ids[auth_cred] = self.get_last_snapshot_id() for auth_cred in self.auth_creds: self.util_enable_client_security(auth_cred) try: self.su.snapshot_utility('SNAPSHOT', '-type=FULL') self.snapshot_ids[auth_cred] = self.get_last_snapshot_id() print_green( 'Snapshot Utility SNAPSHOT command could be executed with "%s" credentials ' % auth_cred) except Exception as e: print_red( 'Snapshot Utility SNAPSHOT command could NOT be executed with "%s" credentials ' % auth_cred) pass
def util_change_snapshot_src_for_remote_grid(self, snapshot_dir, rename_dir=True, repair=False): host = None server_nodes = self.ignite.get_all_default_nodes( ) + self.ignite.get_all_additional_nodes() for node_id in self.ignite.nodes.keys(): if node_id in server_nodes: host = self.ignite.nodes[node_id]['host'] ignite_home = self.ignite.nodes[node_id]['ignite_home'] if repair: f_to_rename = [('/'.join(line.split('/')[:-1]) + '/_test_' + line.split('/')[-1], line) for line in snapshot_dir] else: commands = dict() dir_to_search = '%s/work/snapshot/' % self.ignite.ignite_home if snapshot_dir: dir_to_search = snapshot_dir commands[host] = ['find %s -name \'part-1.bin\'' % dir_to_search] log_print(commands) output = self.ignite.ssh.exec(commands) print_blue(output) files = [file for file in output[host][0].split('\n') if file] print_blue(files) if rename_dir: f_to_rename = [('/'.join(line.split('/')[:-1]), '/'.join(line.split('/')[:-2]) + '/_test_' + line.split('/')[-2]) for line in files] else: f_to_rename = [(line, '/'.join(line.split('/')[:-1]) + '/_test_' + line.split('/')[-1]) for line in files] commands = set() remote_cmd = dict() files = [] for src, dst in f_to_rename: commands.add('mv %s %s' % (src, dst)) files.append(src) remote_cmd[host] = [';'.join(commands)] log_print(remote_cmd) output = self.ignite.ssh.exec(remote_cmd) log_print(output) print_red(remote_cmd) return files
def _set_baseline_few_times(self, times=2): topology_changed = False lst_output = '' utility_baseline_log = 'control-utility-baseline.log' util_sleep_for_a_while(20) for _ in range(0, times): self.cu.set_current_topology_as_baseline(background=True, log=utility_baseline_log) check_command = { self.cu.latest_utility_host: [ 'cat %s/%s' % (self.ignite.client_ignite_home, utility_baseline_log) ] } timeout_counter = 0 baseline_timeout = 120 completed = False while timeout_counter < baseline_timeout and not completed: lst_output = self.ignite.ssh.exec(check_command)[ self.cu.latest_utility_host][0] log_put('Waiting for topology changed %s/%s' % (timeout_counter, baseline_timeout)) if 'Connection to cluster failed.' in lst_output: print_red('Utility unable to connect to cluster') break if 'Number of baseline nodes: ' in lst_output: completed = True break util_sleep_for_a_while(5) timeout_counter += 5 if completed: topology_changed = True break log_print() if not topology_changed: print_red(lst_output) raise TidenException('Unable to change grid topology') return topology_changed
def get_caches_from_log(self, node_id): cache_group_result = [] regexp_str_caches = 'Started cache .*name=(\w+),' regexp_str_caches_groups = 'Started cache .*name=(\w+),.*group=(\w+),' output = self.ignite.grep_in_node_log(node_id, regexp_str_caches) for line in output.split('\n'): cache = search(regexp_str_caches, line) cache_group = search(regexp_str_caches_groups, line) if cache and not cache_group: cache_group_result.append((cache.group(1), None)) elif cache_group: cache_group_result.append( (cache_group.group(1), cache_group.group(2))) print_red(cache_group_result) return cache_group_result
def _measurements_after_test(self, custom_event_name='test', skip_exch=0, skip_minor_exch=-1, max_tries=100, sleep_between_tries=10, num_partitions=1024): exch_maj_topVer = self.new_topVer exch_min_topVer = 0 x1_time = self.exchanges.get_exchange_x1_time(exch_maj_topVer, exch_min_topVer) x2_time = self.exchanges.get_exchange_x2_time(exch_maj_topVer, exch_min_topVer) print_red("Exchange [%d, %d] during %s: %d msec, %d msec" % (exch_maj_topVer, exch_min_topVer, custom_event_name, x1_time, x2_time)) return x1_time, x2_time
def test_check_permissions_to_run_LIST_command(self): """ Run snapshot utility LIST command against all permissions configuration (see self.auth_creds). :return: """ self.util_enable_server_security() self.start_grid() for auth_cred in self.auth_creds: self.util_enable_client_security(auth_cred) try: self.su.snapshot_utility('LIST') print_green( 'Snapshot Utility LIST command could be executed with "%s" credentials ' % auth_cred) except Exception as e: print_red( 'Snapshot Utility LIST command could NOT be executed with "%s" credentials ' % auth_cred) pass
def get_caches_for_test(self): caches = self.ignite.get_cache_names('cache_group') group_caches = {} caches_under_test = [] groups = [] tmp_caches_under_test = caches[1:randint(2, len(caches) - 1)] print_red(caches) caches_groups = self.get_caches_from_log(1) print_red('Caches to start %s' % tmp_caches_under_test) for cache_name, group_name in caches_groups: if group_name: if group_caches.get(group_name): group_caches[group_name].append(cache_name) else: group_caches[group_name] = [cache_name] groups.append(group_name) for group in group_caches.keys(): if set(group_caches[group]).intersection( set(tmp_caches_under_test)): caches_under_test += group_caches[group] caches_under_test += tmp_caches_under_test # if all caches are in caches_under_test just remove one group group_to_exclude = groups[randint(0, len(groups) - 1)] if len(set(caches_under_test)) == len(tmp_caches_under_test): caches_under_test = [ cache for cache in caches_under_test if cache not in group_caches[group_to_exclude] ] print_red(caches_under_test) return set(caches_under_test)