def compss_persistent_worker(): # Get args debug = (sys.argv[1] == 'true') tracing = (sys.argv[2] == 'true') storage_conf = sys.argv[3] tasks_x_node = int(sys.argv[4]) in_pipes = sys.argv[5:5 + tasks_x_node] out_pipes = sys.argv[5 + tasks_x_node:] if tracing: import pyextrae.multiprocessing as pyextrae pyextrae.eventandcounters(SYNC_EVENTS, 1) pyextrae.eventandcounters(TASK_EVENTS, WORKER_RUNNING) if debug: assert tasks_x_node == len(in_pipes) assert tasks_x_node == len(out_pipes) persistent_storage = False if storage_conf != 'null': persistent_storage = True from storage.api import initWorker as initStorageAtWorker from storage.api import finishWorker as finishStorageAtWorker # Load log level configuration file worker_path = os.path.dirname(os.path.realpath(__file__)) if debug: # Debug init_logging_worker(worker_path + '/../../log/logging.json.debug') else: # Default init_logging_worker(worker_path + '/../../log/logging.json.off') if __debug__: logger = logging.getLogger('pycompss.worker.worker') logger.debug("[PYTHON WORKER] piper_worker.py wake up") logger.debug("[PYTHON WORKER] -----------------------------") logger.debug("[PYTHON WORKER] Persistent worker parameters:") logger.debug("[PYTHON WORKER] -----------------------------") logger.debug("[PYTHON WORKER] Debug : " + str(debug)) logger.debug("[PYTHON WORKER] Tracing : " + str(tracing)) logger.debug("[PYTHON WORKER] Tasks per node : " + str(tasks_x_node)) logger.debug("[PYTHON WORKER] In Pipes : " + str(in_pipes)) logger.debug("[PYTHON WORKER] Out Pipes : " + str(out_pipes)) logger.debug("[PYTHON WORKER] Storage conf. : " + str(storage_conf)) logger.debug("[PYTHON WORKER] -----------------------------") if persistent_storage: # Initialize storage initStorageAtWorker(config_file_path=storage_conf) # Create new threads queues = [] for i in range(0, tasks_x_node): if __debug__: logger.debug("[PYTHON WORKER] Launching process " + str(i)) process_name = 'Process-' + str(i) queues.append(Queue()) def create_threads(): processes.append(Process(target=worker, args=(queues[i], process_name, in_pipes[i], out_pipes[i], storage_conf))) processes[i].start() create_threads() # Catch SIGTERM send by bindings_piper to exit all processes signal.signal(signal.SIGTERM, shutdown_handler) # Wait for all threads for i in range(0, tasks_x_node): processes[i].join() # Check if there is any exception message from the threads for i in range(0, tasks_x_node): if not queues[i].empty: print(queues[i].get()) for q in queues: q.close() q.join_thread() if persistent_storage: # Finish storage finishStorageAtWorker() if __debug__: logger.debug("[PYTHON WORKER] Finished") if tracing: pyextrae.eventandcounters(TASK_EVENTS, 0) pyextrae.eventandcounters(SYNC_EVENTS, 0)
persistent_storage = True from storage.api import initWorker as initStorageAtWorker from storage.api import finishWorker as finishStorageAtWorker if tracing: import pyextrae.multiprocessing as pyextrae pyextrae.eventandcounters(SYNC_EVENTS, taskId) # pyextrae.eventandcounters(TASK_EVENTS, 0) pyextrae.eventandcounters(TASK_EVENTS, WORKER_INITIALIZATION) # Load log level configuration file worker_path = os.path.dirname(os.path.realpath(__file__)) if log_level == 'true' or log_level == "debug": # Debug init_logging_worker(worker_path + '/../../log/logging.json.debug') elif log_level == "info" or log_level == "off": # Info or no debug init_logging_worker(worker_path + '/../../log/logging.json.off') else: # Default init_logging_worker(worker_path + '/../../log/logging.json') if persistent_storage: # Initialize storage initStorageAtWorker(config_file_path=storage_conf) # Init worker compss_worker(persistent_storage) if tracing: