def task_worker_load_nse_conditions(itask, nse_iset_number): sleep_cnt = 0 for i in range(1, nse_iset_number + 1): first_print_debug_log = True if i not in itask.loaded_non_se_condition_files: complete_flag = isnse.get_transport_complete_flag_file( *itask.k_m_n, i) transport_complete = False while not transport_complete: if first_print_debug_log: logging.info("waiting for transport complete file %s" % complete_flag) first_print_debug_log = False if sleep_cnt == 10: return False if pathlib.Path(complete_flag).exists(): transport_complete = True else: sleep_cnt += 1 time.sleep(5) nse_conditions = isnse.load_kmn_non_se_results( *itask.k_m_n, i, itask.lp_type, itask.is_use_extended_rules) itask.non_se_conditions.extend(nse_conditions) itask.loaded_non_se_condition_files.add(i) return True
def check_itasks_status(itasks, host_ips, task_queue, working_host_number): is_finish = True for tid in range(len(itasks)): it = itasks[tid] if not it.is_task_finish: current_ne_number = it.working_ne_iset_numbers task_complete = it.incremental_task_complete_number[ current_ne_number] task_total = it.incremental_task_number[current_ne_number] if task_complete == task_total: nse_file = it.flush_non_se_condition() isnse.transport_non_se_results([nse_file], host_ips) isnse.create_and_send_transport_complete_flag_file( *it.k_m_n, current_ne_number, host_ips) send_itasks_progress_info(itasks, task_queue, working_host_number) if it.is_early_terminate(): continue if current_ne_number < it.max_ne: it.working_ne_iset_numbers += 1 is_finish = False else: it.is_task_finish = True else: is_finish = False return is_finish
def init_worker_host_nse_envs(isc_tasks): for it in isc_tasks: k_size = it.k_m_n[0] m_size = it.k_m_n[1] n_size = it.k_m_n[2] isnse.clear_transport_complete_flag_files(k_size, m_size, n_size, it.min_ne, it.max_ne) isnse.create_transport_complete_flag_file(k_size, m_size, n_size, 1) nse_1_path = isnse.get_nse_condition_file_path( k_size, m_size, n_size, 1, it.lp_type, it.is_use_extended_rules) pathlib.Path(nse_1_path).touch()
def task_worker_load_nse_conditions(itask, nse_iset_number): for i in range(1, nse_iset_number + 1): if i not in itask.loaded_non_se_condition_files: complete_flag = isnse.get_transport_complete_flag_file( *itask.k_m_n, i) transport_complete = False while not transport_complete: if pathlib.Path(complete_flag).exists(): transport_complete = True else: time.sleep(3) nse_conditions = isnse.load_kmn_non_se_results( *itask.k_m_n, i, itask.lp_type, itask.is_use_extended_rules) itask.non_se_conditions.extend(nse_conditions) itask.loaded_non_se_condition_files.add(i)
def process_kmn_itask_slice(cls, itask, task_slice, task_name, result_queue, is_check_valid_rules): time_fmt = "%Y-%m-%d %H:%M:%S.%f" itask_id = task_slice[0] task_params = task_slice[1] ne_iset_number = task_params[0] left_zone_isets = task_params[1] left_iset_ids = task_params[2] task_terminate_flag = isnse.get_task_early_terminate_flag_file(*itask.k_m_n) nse_iset_number = ne_iset_number - 1 if nse_iset_number <= itask.rule_number: if nse_iset_number not in itask.loaded_non_se_condition_files: load_complete = False while not load_complete: if pathlib.Path(task_terminate_flag).exists(): itask.is_task_finish = True break load_complete = cls.task_worker_load_nse_conditions(itask, nse_iset_number) if itask.is_task_finish: return True start_time = datetime.now() start_time_str = start_time.strftime(time_fmt)[:-3] right_zone_isets = set(itask.meta_data.search_space_iset_ids) right_zone_isets = right_zone_isets.difference(left_zone_isets) right_iset_number = ne_iset_number - len(left_iset_ids) msg_text = "%s: %d-%d-%d isc task: nonempty iset number %d, left zone length %d, left isets {%s}" % ( task_name, *itask.k_m_n, ne_iset_number, len(left_zone_isets), cls.join_list_data(left_iset_ids)) logging.info(msg_text) nse_cdt_cnt, check_cdt_cnt, task_number, semi_valid_skip_cnt, se_conditions_cache, nse_conditions_cache = \ cls.search_task_slice(cls, itask, left_iset_ids, right_zone_isets, right_iset_number, is_check_valid_rules) # for sec in se_conditions_cache: if len(se_conditions_cache) > 0: result_queue.put((ITaskSignal.se_condition_signal, itask_id, se_conditions_cache)) if len(nse_conditions_cache) > 0: result_queue.put((ITaskSignal.nse_condition_signal, itask_id, nse_conditions_cache)) end_time = datetime.now() end_time_str = end_time.strftime(time_fmt)[:-3] msg_text = "%s: %d-%d-%d isc task: nonempty iset number %d, left zone length %d, left isets {%s}, start time %s, end time %s, find %d se conditions (no semi-valid rules), find %d non-se conditions" % ( task_name, *itask.k_m_n, ne_iset_number, len(left_zone_isets), cls.join_list_data(left_iset_ids), start_time_str, end_time_str, len(se_conditions_cache), nse_cdt_cnt) logging.info(msg_text) result_queue.put( (ITaskSignal.stat_signal, itask_id, ne_iset_number, check_cdt_cnt, task_number, semi_valid_skip_cnt, (start_time, end_time))) return True
def task_worker_load_nse_conditions(itask, ne_iset_number): load_complete = True for i in range(1, ne_iset_number): if i not in itask.loaded_non_se_condition_files: complete_flag = isnse.get_transport_complete_flag_file( *itask.k_m_n, i) if pathlib.Path(complete_flag).exists(): nse_conditions = isnse.load_kmn_non_se_results( *itask.k_m_n, i, itask.lp_type, itask.is_use_extended_rules) itask.non_se_conditions.extend(nse_conditions) itask.loaded_non_se_condition_files.add(i) else: load_complete = False break return load_complete
def itask_slices_generator(cls, isc_config_file): max_space_size = 10000000000 msg_text = "%s init task slices generator ..." % str(cls) logging.info(msg_text) msg.send_message(msg_text) manager_tuple = SearchQueueManager.init_task_worker_queue_manager() task_queue = manager_tuple[1] isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for tid in range(len(isc_tasks)): it = isc_tasks[tid] min_ne = it.min_ne max_ne = it.max_ne rule_number = sum(it.k_m_n) isnse.clear_task_space_layer_finish_flag_files( *it.k_m_n, min_ne, max_ne) for ne_iset_number in range(min_ne, max_ne + 1): msg_text = "generating %d-%d-%d %d layer task slices" % ( *it.k_m_n, ne_iset_number) logging.info(msg_text) cls.itask_slice_generator_by_i4_meta(ne_iset_number, tid, it, max_space_size, manager_tuple) # if ne_iset_number <= rule_number: # cls.itask_slice_generator_by_i4_meta(ne_iset_number, tid, it, max_space_size, manager_tuple) # else: # if not cls.check_itask_terminate_status(it): # flag_file = isnse.get_task_space_layer_finish_flag_file(*it.k_m_n, ne_iset_number - 2) # while not pathlib.Path(flag_file).exists(): # if cls.check_itask_terminate_status(it): # break # time.sleep(1) # # cls.itask_slice_generator_by_i4_meta(ne_iset_number, tid, it, max_space_size, manager_tuple) working_hosts_number = 5 for i in range(working_hosts_number * 200): task_queue.put((ITaskSignal.kill_signal, -1)) logging.info("all itasks has been dispatched")
def check_itask_terminate_status(itask): if not itask.is_task_finish: terminate_flag = isnse.get_task_early_terminate_flag_file( *itask.k_m_n) if pathlib.Path(terminate_flag).exists(): itask.is_task_finish = True else: itask.is_task_finish = False return itask.is_task_finish
def init_worker_host_nse_envs(isc_tasks): for it in isc_tasks: isnse.clear_transport_complete_flag_files(*it.k_m_n, it.min_ne, it.max_ne) isnse.create_transport_complete_flag_file(*it.k_m_n, 1) nse_1_path = isnse.get_nse_condition_file_path( *it.k_m_n, 1, it.lp_type, it.is_use_extended_rules) pathlib.Path(nse_1_path).touch() isnse.clear_task_terminate_flag_files(*it.k_m_n)
def check_itasks_finish_status(itasks): task_finish = True for itask in itasks: if not itask.is_task_finish: task_terminate_flag = isnse.get_task_early_terminate_flag_file( *itask.k_m_n) if pathlib.Path(task_terminate_flag).exists(): itask.is_task_finish = True else: task_finish = False break return task_finish
def update_nse_files_to_new_host(host_ip, itasks): nse_files = list() flag_files = list() for it in itasks: for i in it.non_se_condition_files: nse_files.append( isnse.get_nse_condition_file_path( *it.k_m_n, i, it.lp_type, it.is_use_extended_rules)) flag_files.append( isnse.get_transport_complete_flag_file(*it.k_m_n, i)) if it.is_task_finish: isnse.get_task_early_terminate_flag_file(*it.k_m_n) isnse.transport_non_se_results(nse_files, [host_ip]) isnse.transport_non_se_results(flag_files, [host_ip])
def flush_non_se_condition(self): non_se_file = isnse.save_kmn_non_se_results( self.k_m_n[0], self.k_m_n[1], self.k_m_n[2], self.working_ne_iset_numbers, self.non_se_conditions_buffer, self.lp_type, self.is_use_extended_rules) self.hierarchical_new_non_se_condition_number[ self.working_ne_iset_numbers] = len(self.non_se_conditions_buffer) for nse in self.non_se_conditions_buffer: self.non_se_conditions.append(nse) self.non_se_condition_files.append(self.working_ne_iset_numbers) self.non_se_conditions_buffer = list() return non_se_file
def load_all_nse_condition_files(k_size, m_size, n_size, max_ne, lp_type="lpmln", is_use_extended_rules=False): nse_conditions = list() for i in range(1, max_ne + 1): cdts = isnse.load_kmn_non_se_results(k_size, m_size, n_size, i, lp_type, is_use_extended_rules) nse_conditions.extend(cdts) print("load %d nse conditions for %d-%d-%d %s-%s itask" % (len(nse_conditions), k_size, m_size, n_size, lp_type, str(is_use_extended_rules))) return nse_conditions
def check_itasks_status(cls, itasks, host_ips, manager_tuple, working_host_number): is_finish = True for tid in range(len(itasks)): it = itasks[tid] if not it.is_task_finish: current_ne_number = it.working_ne_iset_numbers if it.is_no_new_se_condition() and current_ne_number > 1: isnse.create_and_send_task_early_terminate_flag_file( *it.k_m_n, current_ne_number, host_ips) it.is_task_finish = True it.save_progress_info() continue task_complete = it.hierarchical_task_complete_number[ current_ne_number] task_total = it.hierarchical_task_number[current_ne_number] if task_complete == task_total: it.save_progress_info() nse_file = it.flush_non_se_condition() isnse.transport_non_se_results([nse_file], host_ips) isnse.create_and_send_transport_complete_flag_file( *it.k_m_n, current_ne_number, host_ips) cls.send_itasks_progress_info(cls, itasks, manager_tuple, working_host_number, False) if current_ne_number < it.max_ne: it.working_ne_iset_numbers += 1 is_finish = False else: it.is_task_finish = True isnse.create_and_send_task_early_terminate_flag_file( *it.k_m_n, current_ne_number, host_ips) it.save_progress_info() else: is_finish = False return is_finish
def itask_slices_generator(cls, isc_config_file): max_space_size = 100000000000 msg_text = "%s init task slices generator ..." % str(cls) logging.info(msg_text) msg.send_message(msg_text) manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_worker_queue_manager() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for tid in range(len(isc_tasks)): it = isc_tasks[tid] min_ne = it.min_ne max_ne = it.max_ne isnse.clear_task_space_layer_finish_flag_files(*it.k_m_n, min_ne, max_ne) left_zone_length = len(it.meta_data.search_i4_composed_iset_ids) search_isets = copy.deepcopy(it.meta_data.search_space_iset_ids) search_isets_length = len(search_isets) max_left_zone_length = 12 if left_zone_length > max_left_zone_length: left_zone_length = 12 rule_number = sum(it.k_m_n) left_zone_iset_ids = search_isets[0:left_zone_length] right_zone_iset_ids = search_isets[left_zone_length:] for ne_iset_number in range(min_ne, max_ne + 1): msg_text = "generating %d-%d-%d %d layer task slices" % (*it.k_m_n, ne_iset_number) logging.info(msg_text) if ne_iset_number <= rule_number: left_split = True task_slices = CombinationSearchingSpaceSplitter.vandermonde_generator( left_zone_iset_ids, right_zone_iset_ids, ne_iset_number) for ts in task_slices: new_ts = (left_split, set(ts[0]), left_zone_length, ts[2]) task_queue.put((tid, new_ts)) else: if not cls.check_itask_terminate_status(it): flag_file = isnse.get_task_space_layer_finish_flag_file(*it.k_m_n, ne_iset_number - 2) while not pathlib.Path(flag_file).exists(): if cls.check_itask_terminate_status(it): break time.sleep(1) task_slices = CombinationSearchingSpaceSplitter.merge_small_near_uniform_vandermonde_generator( left_zone_iset_ids, right_zone_iset_ids, ne_iset_number, max_space_size=max_space_size) ts_cnt = 0 for ts in task_slices: task_queue.put((tid, ts)) ts_cnt += 1 if ts_cnt % 10000 == 0 and cls.check_itask_terminate_status(it): break working_hosts_number = 5 for i in range(working_hosts_number * 200): task_queue.put((ITaskSignal.kill_signal, -1)) logging.info("all itasks has been dispatched")
def init_kmn_isc_task_master_from_config(cls, isc_config_file="isets-tasks.json", sleep_time=30): start_time = datetime.now() manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_master_queue_manager() manager_tuple = (manager, task_queue, ht_task_queue, result_queue) localhost_ip = ssh.get_host_ip() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for itask in isc_tasks: itask.init_task_numbers() isnse.clear_task_terminate_flag_files(*itask.k_m_n) ts_generator_pool = cls.init_task_slices_generator_pool(cls, isc_config_file) pre_pool = cls.init_pre_task_worker_pool(cls, isc_config_file, result_queue) working_hosts_number = 0 msg_text = "isc task master start, load %d isc tasks from %s" % (len(isc_tasks), isc_config_file) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 100 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) for it in isc_tasks: it.save_progress_info() sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) for it in isc_tasks: it.save_progress_info() sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_itasks_status(cls, isc_tasks, online_hosts, manager_tuple, working_hosts_number) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) cls.update_nse_files_to_new_host(host_ip, isc_tasks) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) ts_generator_pool.join() pre_pool.join() RawIConditionSearchWorker.send_worker_terminate_info(RawIConditionSearchWorker, localhost_ip, result_queue) while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) working_hosts_number += whn_diff[0] end_time = datetime.now() msg_text = "isc tasks finish, running time: %s" % str(end_time - start_time) logging.info(msg_text) msg.send_message(msg=msg_text) for it in isc_tasks: it.task_finish() msg_text = it.get_final_detail_progress_info() logging.info(msg_text) msg.send_message(msg=msg_text) # attached_files.append(it.result_file) return isc_tasks
def init_kmn_isc_task_master_from_config( cls, isc_config_file="isets-tasks.json", sleep_time=30): manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_master_queue_manager() manager_tuple = (manager, task_queue, ht_task_queue, result_queue) localhost_ip = ssh.get_host_ip() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks result_record = list() for itask in isc_tasks: isnse.clear_task_terminate_flag_files(*itask.k_m_n) i4_iset_size = len(itask.meta_data.search_i4_composed_iset_ids) file = i4u.get_kmn_i4_all_result_file(*itask.k_m_n) if os.path.exists(file): os.remove(file) record = [2**i4_iset_size - 1, 0, list(), file] result_record.append(record) ts_generator_pool = cls.init_task_slices_generator_pool( cls, isc_config_file) pre_task_pool = cls.init_pre_task_worker_pool(cls, isc_config_file, result_queue) working_hosts_number = 0 msg_text = "isc task master start, load %d isc tasks from %s" % ( len(isc_tasks), isc_config_file) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 10 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_i4_tasks_status(cls, result_record) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_i4_result_queue(cls, result_queue, result_record) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) ts_generator_pool.join() pre_task_pool.join() I4SearchWorker.send_worker_terminate_info(I4SearchWorker, localhost_ip, result_queue) while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_i4_result_queue(cls, result_queue, result_record) working_hosts_number += whn_diff[0] msg_text = "isc tasks finish!" logging.info(msg_text) msg.send_message(msg=msg_text) cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, True) return isc_tasks
def kmn_isc_task_worker(isc_config_file="isets-tasks.json", worker_id=1, lp_type="lpmln", is_check_valid_rules=True, is_use_extended_rules=True): ISCFileTaskTerminationWorkerQueueManager.register("get_task_queue") ISCFileTaskTerminationWorkerQueueManager.register("get_result_queue") manager = ISCFileTaskTerminationWorkerQueueManager( address=(config.task_host, config.task_host_port), authkey=bytes(config.task_host_key, encoding="utf-8")) is_check_valid_rules = False manager.connect() task_queue = manager.get_task_queue() result_queue = manager.get_result_queue() worker_name = "worker-%d" % worker_id time_fmt = "%Y-%m-%d %H:%M:%S.%f" worker_host_name = config.worker_host_name msg_text = "task worker %s start!" % (worker_name) logging.info(msg_text) isc_tasks = ISCTaskConfig(isc_config_file, is_use_extended_rules) isc_tasks = isc_tasks.isc_tasks processed_task_slices_number = 0 for it in isc_tasks: it.loaded_non_se_condition_files.add(1) first_print_debug_log = True while True: if not pathlib.Path(config.task_host_lock_file).exists(): break if task_queue.empty(): if first_print_debug_log: logging.info("waiting for isc task slices") first_print_debug_log = False time.sleep(20) continue itask = task_queue.get() if itask[0] == kill_signal: msg_text = "%s:%s isc task worker terminate ..." % ( worker_host_name, worker_name) logging.info(msg_text) break isc_task_id = itask[0] task_params = itask[1] ne_iset_number = task_params[0] left_length = task_params[1] left_iset_ids = task_params[2] it = isc_tasks[isc_task_id] task_terminate_flag = isnse.get_task_early_terminate_flag_file( *it.k_m_n) nse_iset_number = ne_iset_number - 1 if nse_iset_number not in it.loaded_non_se_condition_files: load_complete = False while not load_complete: if pathlib.Path(task_terminate_flag).exists(): it.is_task_finish = True break load_complete = task_worker_load_nse_conditions( it, nse_iset_number) if it.is_task_finish: continue start_time = datetime.now() start_time_str = start_time.strftime(time_fmt)[:-3] k_size = it.k_m_n[0] m_size = it.k_m_n[1] n_size = it.k_m_n[2] se_iset_ids = it.meta_data.se_iset_ids right_iset_ids = se_iset_ids[left_length:] right_iset_number = ne_iset_number - len(left_iset_ids) # unknown_iset_number = len(se_iset_ids) task_name = worker_name + ("-task-%d" % processed_task_slices_number) msg_text = "%s: %d-%d-%d isc task: nonempty iset number %d, left zone length %d, left isets {%s}" % ( task_name, k_size, m_size, n_size, ne_iset_number, left_length, join_list_data(left_iset_ids)) logging.info(msg_text) se_cdt_cnt = 0 nse_cdt_cnt = 0 new_nse_cdt_cnt = 0 check_cdt_cnt = 0 se_conditions_cache = list() nse_conditions_cache = list() validator = ISetConditionValidator( lp_type=lp_type, is_use_extended_rules=is_use_extended_rules) task_iter = itertools.combinations(right_iset_ids, right_iset_number) task_number = 0 for right_ti in task_iter: non_ne_ids = list() non_ne_ids.extend(left_iset_ids) non_ne_ids.extend(list(right_ti)) non_ne_ids = set(non_ne_ids) task_number += 1 if check_contain_nse_subparts(non_ne_ids, it): nse_cdt_cnt += 1 continue check_cdt_cnt += 1 if not check_contain_i4_isets(non_ne_ids, it): continue is_contain_valid_rule, is_strongly_equivalent, condition = \ validator.validate_kmn_extended_iset_condition_from_non_emtpy_iset_ids_return_icondition_obj( non_ne_ids, k_size, m_size, n_size, is_check_valid_rule=is_check_valid_rules) # if not is_contain_valid_rule: if is_strongly_equivalent: se_conditions_cache.append(condition) se_cdt_cnt += 1 else: nse_conditions_cache.append(condition) nse_cdt_cnt += 1 new_nse_cdt_cnt += 1 # for sec in se_conditions_cache: if se_cdt_cnt > 0: result_queue.put( (se_condition_signal, isc_task_id, se_conditions_cache)) if new_nse_cdt_cnt > 0: result_queue.put( (nse_condition_signal, isc_task_id, nse_conditions_cache)) end_time = datetime.now() end_time_str = end_time.strftime(time_fmt)[:-3] msg_text = "%s: %d-%d-%d isc task: nonempty iset number %d, left zone length %d, left isets {%s}, start time %s, end time %s, find %d se conditions (no semi-valid rules), find %d non-se conditions" % ( task_name, k_size, m_size, n_size, ne_iset_number, left_length, join_list_data(left_iset_ids), start_time_str, end_time_str, se_cdt_cnt, nse_cdt_cnt) logging.info(msg_text) result_queue.put((stat_signal, isc_task_id, ne_iset_number, check_cdt_cnt, task_number, (start_time, end_time))) processed_task_slices_number += 1 first_print_debug_log = True logging.info("%s processes %d isc task slices" % (worker_name, processed_task_slices_number))