def sync_log_config_file(host_ips): username = config.ssh_user_name password = config.ssh_password port = 22 file = [(r"W:\my_projects\lpmln_isets\logging.ini", "/home/wangbin/experiments/lpmln_isets/lpmln_isets/logging.ini")] for ip in host_ips: ssh.transport_files(ip, port, username, password, file)
def init_kmn_isc_task_workers(cls, isc_config_file="isets-tasks.json", is_check_valid_rules=True, result_queue=None): payload = config.worker_payload worker_pool = Pool(payload) pathlib.Path(config.task_host_lock_file).touch() if result_queue is None: manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_worker_queue_manager() host_ip = ssh.get_host_ip() result_queue.put( (ITaskSignal.add_worker_signal, config.worker_host_name, host_ip)) logging.info("task worker host %s start ..." % config.worker_host_name) # 初始化不等价条件目录文件 isc_tasks = ITaskConfig(isc_config_file) isc_tasks = isc_tasks.isc_tasks cls.init_worker_host_nse_envs(isc_tasks) for i in range(payload): worker_pool.apply_async(cls.kmn_isc_task_worker, args=(cls, isc_config_file, i + 1, is_check_valid_rules)) worker_pool.close() return worker_pool, result_queue, host_ip
def transport_non_se_results(files, hosts): # print(files) # print(hosts) file_pairs = list() for f in files: file_pairs.append((f, f)) for h in hosts: send_success = False while not send_success: try: ssh.transport_files(h, 22, config.ssh_user_name, config.ssh_password, file_pairs) send_success = True except Exception as e: print(e) send_success = False
def init_kmn_isc_task_workers(cls, isc_config_file="isets-tasks.json", is_check_valid_rules=True): payload = config.worker_payload worker_pool = Pool(payload) pathlib.Path(config.task_host_lock_file).touch() SearchWorkerQueueManger.register("get_task_queue") SearchWorkerQueueManger.register("get_result_queue") manager = SearchWorkerQueueManger(address=(config.task_host, config.task_host_port), authkey=bytes(config.task_host_key, encoding="utf-8")) manager.connect() result_queue = manager.get_result_queue() host_ip = ssh.get_host_ip() result_queue.put( (ITaskSignal.add_worker_signal, config.worker_host_name, host_ip)) logging.info("task worker host %s start ..." % config.worker_host_name) # 初始化不等价条件目录文件 isc_tasks = ITaskConfig(isc_config_file) isc_tasks = isc_tasks.isc_tasks cls.init_worker_host_nse_envs(isc_tasks) for i in range(payload): worker_pool.apply_async(cls.kmn_isc_task_worker, args=(cls, isc_config_file, i + 1, is_check_valid_rules)) worker_pool.close() worker_pool.join() # if pathlib.Path(task_worker_host_lock_file).exists(): result_queue.put( (ITaskSignal.kill_signal, config.worker_host_name, host_ip)) logging.info("task worker host %s send kill signal ..." % config.worker_host_name) logging.info("task worker host %s exit ..." % config.worker_host_name)
def init_kmn_isc_task_workers(isc_config_file="isets-tasks.json", lp_type="lpmln", is_check_valid_rules=True, is_use_extended_rules=True): payload = config.worker_payload worker_pool = Pool(payload) pathlib.Path(config.task_host_lock_file).touch() ISCFileTaskTerminationWorkerQueueManager.register("get_task_queue") ISCFileTaskTerminationWorkerQueueManager.register("get_result_queue") manager = ISCFileTaskTerminationWorkerQueueManager(address=(config.task_host, config.task_host_port), authkey=bytes(config.task_host_key, encoding="utf-8")) manager.connect() result_queue = manager.get_result_queue() host_ip = ssh.get_host_ip() result_queue.put((add_worker_signal, config.worker_host_name, host_ip)) logging.info("task worker host %s start ..." % config.worker_host_name) for i in range(payload): worker_pool.apply_async(kmn_isc_task_worker, args=(isc_config_file, "worker-%d" % (i + 1), lp_type, is_check_valid_rules, is_use_extended_rules)) worker_pool.close() worker_pool.join() # if pathlib.Path(task_worker_host_lock_file).exists(): result_queue.put((kill_signal, config.worker_host_name, host_ip)) logging.info("task worker host %s send kill signal ..." % config.worker_host_name) logging.info("task worker host %s exit ..." % config.worker_host_name)
def transport_files(hostip, files): file_pairs = list() for f in files: file_pairs.append((f, f)) ssh.transport_files(hostip, 22, config.ssh_user_name, config.ssh_password, file_pairs)
def init_kmn_isc_task_master_from_config(cls, isc_config_file="isets-tasks.json", sleep_time=30): start_time = datetime.now() manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_master_queue_manager() manager_tuple = (manager, task_queue, ht_task_queue, result_queue) localhost_ip = ssh.get_host_ip() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for itask in isc_tasks: itask.init_task_numbers() isnse.clear_task_terminate_flag_files(*itask.k_m_n) ts_generator_pool = cls.init_task_slices_generator_pool(cls, isc_config_file) pre_pool = cls.init_pre_task_worker_pool(cls, isc_config_file, result_queue) working_hosts_number = 0 msg_text = "isc task master start, load %d isc tasks from %s" % (len(isc_tasks), isc_config_file) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 100 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) for it in isc_tasks: it.save_progress_info() sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) for it in isc_tasks: it.save_progress_info() sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_itasks_status(cls, isc_tasks, online_hosts, manager_tuple, working_hosts_number) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) cls.update_nse_files_to_new_host(host_ip, isc_tasks) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) ts_generator_pool.join() pre_pool.join() RawIConditionSearchWorker.send_worker_terminate_info(RawIConditionSearchWorker, localhost_ip, result_queue) while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) working_hosts_number += whn_diff[0] end_time = datetime.now() msg_text = "isc tasks finish, running time: %s" % str(end_time - start_time) logging.info(msg_text) msg.send_message(msg=msg_text) for it in isc_tasks: it.task_finish() msg_text = it.get_final_detail_progress_info() logging.info(msg_text) msg.send_message(msg=msg_text) # attached_files.append(it.result_file) return isc_tasks
def sync_all_files(host_ips, files): username = config.ssh_user_name password = config.ssh_password port = 22 for ip in host_ips: ssh.transport_files(ip, port, username, password, files)
def init_kmn_isc_task_master_from_config(isc_config_file="isets-tasks.json", sleep_time=30, is_use_extended_rules=True, is_frequent_log=False): start_time = datetime.now() ISCFileTaskTerminationMasterQueueManager.register("get_task_queue", callable=get_task_queue) ISCFileTaskTerminationMasterQueueManager.register( "get_result_queue", callable=get_result_queue) manager = ISCFileTaskTerminationMasterQueueManager( address=(config.task_host, config.task_host_port), authkey=bytes(config.task_host_key, encoding="utf-8")) manager.start() task_queue = manager.get_task_queue() result_queue = manager.get_result_queue() localhost_ip = ssh.get_host_ip() task_generator = Pool(2) task_generator.apply_async(itask_slices_generator, args=(isc_config_file, is_use_extended_rules)) task_generator.close() working_hosts_number = 0 msg_text = "isc task master start, load isc tasks from %s" % ( isc_config_file) logging.info(msg_text) msg.send_message(msg_text) isc_tasks_cfg = ISCTaskConfig(isc_config_file, is_use_extended_rules) isc_tasks = isc_tasks_cfg.isc_tasks for itask in isc_tasks: itask.init_task_numbers() sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 100000 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: send_itasks_progress_info(isc_tasks, task_queue, working_hosts_number) sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: send_itasks_progress_info(isc_tasks, task_queue, working_hosts_number) sleep_cnt = 0 print_cnt = 0 task_finish = check_itasks_status(isc_tasks, online_hosts, task_queue, working_hosts_number) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = process_result_queue(result_queue, isc_tasks) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) update_nse_files_to_new_host(host_ip, isc_tasks) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) msg_text = "all isc task slices are discatched!" logging.info(msg_text) msg.send_message(msg_text) task_generator.join() while working_hosts_number > 0: if sleep_cnt == 10: msg_texts = dump_isc_task_results(isc_tasks) msg_text = "all isc tasks are discatched, DO NOT add new worker! isc tasks progress info, remain %d task hosts: \n\t\t%s" \ % (working_hosts_number, "\n\t\t".join(msg_texts)) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = process_result_queue(result_queue, isc_tasks) working_hosts_number += whn_diff[0] msg_texts = [] attached_files = [] for it in isc_tasks: it.task_finish() msg_texts.append(it.get_final_detail_progress_info()) attached_files.append(it.result_file) msg_text = "isc tasks finish! \n\t\t%s" % "\n\t\t".join(msg_texts) logging.info(msg_text) msg.send_message(msg=msg_text, attached_files=attached_files) return isc_tasks
def init_kmn_isc_task_master_from_config( cls, isc_config_file="isets-tasks.json", sleep_time=30): manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_master_queue_manager() manager_tuple = (manager, task_queue, ht_task_queue, result_queue) localhost_ip = ssh.get_host_ip() ts_generator_pool = cls.init_task_slices_generator_pool( cls, isc_config_file) ht_pool = cls.init_pre_task_worker_pool(cls, isc_config_file, result_queue) working_hosts_number = 0 # ht_checking_results = list() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for itask in isc_tasks: itask.init_task_numbers() if os.path.exists(itask.result_file): os.remove(itask.result_file) # ht_checking_results.append(list()) msg_text = "isc task master start, load %d isc tasks from %s" % ( len(isc_tasks), isc_config_file) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 100 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_itasks_status(cls, isc_tasks, online_hosts, manager_tuple, working_hosts_number) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) ts_generator_pool.join() ht_pool.join() HTCheckingWorker.send_worker_terminate_info(HTCheckingWorker, localhost_ip, result_queue) while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, isc_tasks, manager_tuple, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) working_hosts_number += whn_diff[0] msg_text = "isc tasks finish!" logging.info(msg_text) msg.send_message(msg=msg_text) for it in isc_tasks: it.dump_tmp_se_condition_saving_mem() msg_text = it.get_final_detail_progress_info() logging.info(msg_text) msg.send_message(msg=msg_text) return isc_tasks
def init_kmn_isc_task_master_from_config( cls, isc_config_file="isets-tasks.json", sleep_time=30): start_time = datetime.now() SearchMasterQueueManger.register("get_task_queue", callable=cls.get_global_task_queue) SearchMasterQueueManger.register("get_result_queue", callable=cls.get_global_result_queue) manager = SearchMasterQueueManger(address=(config.task_host, config.task_host_port), authkey=bytes(config.task_host_key, encoding="utf-8")) manager.start() task_queue = manager.get_task_queue() result_queue = manager.get_result_queue() localhost_ip = ssh.get_host_ip() task_generator = Pool(2) task_generator.apply_async(cls.itask_slices_generator, args=(cls, isc_config_file)) task_generator.close() working_hosts_number = 0 msg_text = "isc task master start, load isc tasks from %s" % ( isc_config_file) logging.info(msg_text) msg.send_message(msg_text) isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks for itask in isc_tasks: itask.init_task_numbers() sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 100000 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, isc_tasks, task_queue, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, isc_tasks, task_queue, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_itasks_status(cls, isc_tasks, online_hosts, task_queue, working_hosts_number) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) cls.update_nse_files_to_new_host(host_ip, isc_tasks) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) task_generator.join() while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, isc_tasks, task_queue, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_result_queue(cls, result_queue, isc_tasks) working_hosts_number += whn_diff[0] msg_texts = [] attached_files = [] for it in isc_tasks: it.task_finish() msg_texts.append(it.get_final_detail_progress_info()) attached_files.append(it.result_file) msg_text = "isc tasks finish! \n\t\t%s" % "\n\t\t".join(msg_texts) logging.info(msg_text) msg.send_message(msg=msg_text, attached_files=attached_files) return isc_tasks
def init_kmn_isc_task_master_from_config( cls, isc_config_file="isets-tasks.json", sleep_time=30): manager, task_queue, ht_task_queue, result_queue = \ SearchQueueManager.init_task_master_queue_manager() manager_tuple = (manager, task_queue, ht_task_queue, result_queue) localhost_ip = ssh.get_host_ip() isc_tasks_cfg = ITaskConfig(isc_config_file) isc_tasks = isc_tasks_cfg.isc_tasks result_record = list() for itask in isc_tasks: isnse.clear_task_terminate_flag_files(*itask.k_m_n) i4_iset_size = len(itask.meta_data.search_i4_composed_iset_ids) file = i4u.get_kmn_i4_all_result_file(*itask.k_m_n) if os.path.exists(file): os.remove(file) record = [2**i4_iset_size - 1, 0, list(), file] result_record.append(record) ts_generator_pool = cls.init_task_slices_generator_pool( cls, isc_config_file) pre_task_pool = cls.init_pre_task_worker_pool(cls, isc_config_file, result_queue) working_hosts_number = 0 msg_text = "isc task master start, load %d isc tasks from %s" % ( len(isc_tasks), isc_config_file) logging.info(msg_text) msg.send_message(msg_text) sleep_cnt = 0 online_hosts = set() progress_msg_cnt = 10 task_finish = False print_loop = 10 print_cnt = 0 while not task_finish: print_cnt += 1 if print_cnt == print_loop: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 if sleep_cnt == progress_msg_cnt: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, False) sleep_cnt = 0 print_cnt = 0 task_finish = cls.check_i4_tasks_status(cls, result_record) if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_i4_result_queue(cls, result_queue, result_record) whn_number = whn_diff[0] host_ip = whn_diff[1] working_hosts_number += whn_number if whn_number == 1: if host_ip != localhost_ip: online_hosts.add(host_ip) elif whn_number == -1: if host_ip != localhost_ip: online_hosts.remove(host_ip) ts_generator_pool.join() pre_task_pool.join() I4SearchWorker.send_worker_terminate_info(I4SearchWorker, localhost_ip, result_queue) while working_hosts_number > 0: if sleep_cnt == 10: cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, True) sleep_cnt = 0 if result_queue.empty(): time.sleep(sleep_time) sleep_cnt += 1 continue whn_diff = cls.process_i4_result_queue(cls, result_queue, result_record) working_hosts_number += whn_diff[0] msg_text = "isc tasks finish!" logging.info(msg_text) msg.send_message(msg=msg_text) cls.send_itasks_progress_info(cls, result_record, manager_tuple, working_hosts_number, True) return isc_tasks
def test_transport_file2(): path = [(local_path + "ts-2-1-1.txt", server_path + "ts-2-1-1.txt")] ssh.transport_files(ip, 22, config.ssh_user_name, config.ssh_password, path)
def test_transport_files(): paths = get_threeparts_paths() ssh.transport_files_by_threeparts_path(ip, 22, config.ssh_user_name, config.ssh_password, paths)