def main(): global TRACING global WORKER_CONF # Configure the global tracing variable from the argument TRACING = (int(sys.argv[4]) > 0) with trace_multiprocessing_worker() if TRACING else dummy_context(): # Configure the piper worker with the arguments WORKER_CONF = PiperWorkerConfiguration() WORKER_CONF.update_params(sys.argv) compss_persistent_worker(WORKER_CONF)
def main(): # Emit sync event if tracing is enabled tracing = sys.argv[1] == 'true' task_id = int(sys.argv[2]) log_level = sys.argv[3] storage_conf = sys.argv[4] stream_backend = sys.argv[5] stream_master_name = sys.argv[6] stream_master_port = sys.argv[7] # method_type = sys.argv[8] params = sys.argv[9:] # class_name = sys.argv[9] # method_name = sys.argv[10] # num_slaves = sys.argv[11] # i = 11 + num_slaves # slaves = sys.argv[11..i] # numCus = sys.argv[i+1] # has_target = sys.argv[i+2] == 'true' # num_params = int(sys.argv[i+3]) # params = sys.argv[i+4..] print("tracing = " + str(tracing)) print("task_id = " + str(task_id)) print("log_level = " + str(log_level)) print("storage_conf = " + str(storage_conf)) persistent_storage = False if storage_conf != 'null': persistent_storage = True streaming = False if stream_backend not in [None, 'null', 'NONE']: streaming = True with trace_multiprocessing_worker() if tracing else dummy_context(): if streaming: # Start streaming DistroStreamClientHandler.init_and_start( master_ip=stream_master_name, master_port=stream_master_port) # Load log level configuration file worker_path = os.path.dirname(os.path.realpath(__file__)) if log_level == 'true' or log_level == "debug": # Debug init_logging_worker( worker_path + '/../../../log/logging_gat_worker_debug.json', tracing) elif log_level == "info" or log_level == "off": # Info or no debug init_logging_worker( worker_path + '/../../../log/logging_gat_worker_off.json', tracing) else: # Default init_logging_worker( worker_path + '/../../../log/logging_gat_worker.json', tracing) if persistent_storage: # Initialize storage with event(INIT_STORAGE_AT_WORKER_EVENT): from storage.api import initWorker as initStorageAtWorker initStorageAtWorker(config_file_path=storage_conf) # Init worker exit_code = compss_worker(tracing, str(task_id), storage_conf, params) if streaming: # Finish streaming DistroStreamClientHandler.set_stop() if persistent_storage: # Finish storage with event(FINISH_STORAGE_AT_WORKER_EVENT): from storage.api import finishWorker as finishStorageAtWorker finishStorageAtWorker() if exit_code == 1: exit(1)
def main(): # type: () -> None """ GAT worker main code. Executes the task provided by parameters. :return: None """ # Emit sync event if tracing is enabled tracing = sys.argv[1] == 'true' task_id = int(sys.argv[2]) log_level = sys.argv[3] storage_conf = sys.argv[4] stream_backend = sys.argv[5] stream_master_name = sys.argv[6] stream_master_port = sys.argv[7] # Next: method_type = sys.argv[8] params = sys.argv[9:] # Next parameters: # class_name = sys.argv[10] # method_name = sys.argv[11] # num_slaves = sys.argv[12] # i = 13 + num_slaves # slaves = sys.argv[12..i] # numCus = sys.argv[i+1] # has_target = sys.argv[i+2] == 'true' # num_params = int(sys.argv[i+3]) # params = sys.argv[i+4..] if log_level == "true" or log_level == "debug": print("Tracing = " + str(tracing)) print("Task id = " + str(task_id)) print("Log level = " + str(log_level)) print("Storage conf = " + str(storage_conf)) persistent_storage = False if storage_conf != "null": persistent_storage = True streaming = False if stream_backend not in [None, "null", "NONE"]: streaming = True with trace_multiprocessing_worker() if tracing else dummy_context(): if streaming: # Start streaming DistroStreamClientHandler.init_and_start( master_ip=stream_master_name, master_port=stream_master_port) # Load log level configuration file worker_path = os.path.dirname(os.path.realpath(__file__)) if log_level == "true" or log_level == "debug": # Debug log_json = "".join( (worker_path, "/../../../log/logging_gat_worker_debug.json")) elif log_level == "info" or log_level == "off": # Info or no debug log_json = "".join( (worker_path, "/../../../log/logging_gat_worker_off.json")) else: # Default log_json = "".join( (worker_path, "/../../../log/logging_gat_worker.json")) init_logging_worker(log_json, tracing) if persistent_storage: # Initialize storage with event_worker(INIT_STORAGE_AT_WORKER_EVENT): from storage.api import initWorker as initStorageAtWorker # noqa initStorageAtWorker(config_file_path=storage_conf) # Init worker exit_code = compss_worker(tracing, str(task_id), storage_conf, params, log_json) if streaming: # Finish streaming DistroStreamClientHandler.set_stop() if persistent_storage: # Finish storage with event_worker(FINISH_STORAGE_AT_WORKER_EVENT): from storage.api import finishWorker as finishStorageAtWorker # noqa finishStorageAtWorker() if exit_code == 1: exit(1)
if persistent_storage: # Finish storage if __debug__: logger.debug(HEADER + "Stopping persistent storage") with event(FINISH_STORAGE_AT_WORKER_EVENT): from storage.api import finishWorker as finishStorageAtWorker finishStorageAtWorker() if __debug__: logger.debug(HEADER + "Finished") control_pipe.write(QUIT_TAG) control_pipe.close() ############################ # Main -> Calls main method ############################ if __name__ == '__main__': # Configure the global tracing variable from the argument TRACING = (int(sys.argv[2]) > 0) with trace_multiprocessing_worker() if TRACING else dummy_context(): # Configure the piper worker with the arguments WORKER_CONF = PiperWorkerConfiguration() WORKER_CONF.update_params(sys.argv) compss_persistent_worker(WORKER_CONF)