def stop_all(exit_code): from pycompss.api.api import compss_stop global STREAMING global PERSISTENT_STORAGE global LOGGER # Stop STREAMING if STREAMING: stop_streaming() # Stop persistent storage if PERSISTENT_STORAGE: master_stop_storage(LOGGER) compss_stop(exit_code) sys.stdout.flush() sys.stderr.flush() sys.exit(exit_code)
def stop_all(exit_code): # type: (int) -> None """ Stop everything smoothly. :param exit_code: Exit code. :return: None """ from pycompss.api.api import compss_stop global STREAMING global PERSISTENT_STORAGE global LOGGER # Stop STREAMING if STREAMING: stop_streaming() # Stop persistent storage if PERSISTENT_STORAGE: master_stop_storage(LOGGER) compss_stop(exit_code) sys.stdout.flush() sys.stderr.flush() sys.exit(exit_code)
def __hard_stop__(debug, sync, logger, ipython): # type: (bool, bool, typing.Any, typing.Any) -> None """ The runtime has been stopped by any error and this method stops the remaining things in the binding. :param debug: If debugging. :param sync: Scope variables synchronization [ True | False ]. :param logger: Logger where to put the logging messages. :param ipython: Ipython instance. :return: None """ print("The runtime is not running.") # Check that everything is stopped as well: # Stop streaming if STREAMING: stop_streaming() # Stop persistent storage if PERSISTENT_STORAGE: master_stop_storage(logger) # Clean any left object in the object tracker OT.clean_object_tracker() # Cleanup events and files release_event_manager(ipython) __clean_temp_files__() # Stop watching stdout and stderr STDW.stop_watching(clean=not debug) # Retrieve the remaining messages that could have been captured. last_messages = STDW.get_messages() if last_messages: for message in last_messages: print(message) if sync: print("* Can not synchronize any future object.") return None
def launch_pycompss_application( app, func, log_level='off', # type: str o_c=False, # type: bool debug=False, # type: bool graph=False, # type: bool trace=False, # type: bool monitor=None, # type: int project_xml=None, # type: str resources_xml=None, # type: str summary=False, # type: bool task_execution='compss', # type: str storage_impl=None, # type: str storage_conf=None, # type: str streaming_backend=None, # type: str streaming_master_name=None, # type: str streaming_master_port=None, # type: str task_count=50, # type: int app_name=None, # type: str uuid=None, # type: str base_log_dir=None, # type: str specific_log_dir=None, # type: str extrae_cfg=None, # type: str comm='NIO', # type: str conn='es.bsc.compss.connectors.DefaultSSHConnector', # noqa: E501 master_name='', # type: str master_port='', # type: str scheduler='es.bsc.compss.scheduler.loadbalancing.LoadBalancingScheduler', # noqa: E501 jvm_workers='-Xms1024m,-Xmx1024m,-Xmn400m', cpu_affinity='automatic', # type: str gpu_affinity='automatic', # type: str fpga_affinity='automatic', # type: str fpga_reprogram='', # type: str profile_input='', # type: str profile_output='', # type: str scheduler_config='', # type: str external_adaptation=False, # type: bool propagate_virtual_environment=True, # type: bool mpi_worker=False, # type: bool *args, **kwargs): # type: (...) -> None """ Launch PyCOMPSs application from function. :param app: Application path :param func: Function :param log_level: Logging level [ 'trace'|'debug'|'info'|'api'|'off' ] (default: 'off') :param o_c: Objects to string conversion [ True | False ] (default: False) :param debug: Debug mode [ True | False ] (default: False) (overrides log_level) :param graph: Generate graph [ True | False ] (default: False) :param trace: Generate trace [ True | False | 'scorep' | 'arm-map' | 'arm-ddt'] (default: False) :param monitor: Monitor refresh rate (default: None) :param project_xml: Project xml file path :param resources_xml: Resources xml file path :param summary: Execution summary [ True | False ] (default: False) :param task_execution: Task execution (default: 'compss') :param storage_impl: Storage implementation path :param storage_conf: Storage configuration file path :param streaming_backend: Streaming backend (default: None) :param streaming_master_name: Streaming master name (default: None) :param streaming_master_port: Streaming master port (default: None) :param task_count: Task count (default: 50) :param app_name: Application name (default: Interactive_date) :param uuid: UUId :param base_log_dir: Base logging directory :param specific_log_dir: Specific logging directory :param extrae_cfg: Extrae configuration file path :param comm: Communication library (default: NIO) :param conn: Connector (default: DefaultSSHConnector) :param master_name: Master Name (default: '') :param master_port: Master port (default: '') :param scheduler: Scheduler (default: es.bsc.compss.scheduler.loadbalancing.LoadBalancingScheduler) :param jvm_workers: Java VM parameters (default: '-Xms1024m,-Xmx1024m,-Xmn400m') :param cpu_affinity: CPU Core affinity (default: 'automatic') :param gpu_affinity: GPU Core affinity (default: 'automatic') :param fpga_affinity: FPA Core affinity (default: 'automatic') :param fpga_reprogram: FPGA repogram command (default: '') :param profile_input: Input profile (default: '') :param profile_output: Output profile (default: '') :param scheduler_config: Scheduler configuration (default: '') :param external_adaptation: External adaptation [ True | False ] (default: False) :param propagate_virtual_environment: Propagate virtual environment [ True | False ] (default: False) :param mpi_worker: Use the MPI worker [ True | False ] (default: False) :param args: Positional arguments :param kwargs: Named arguments :return: Execution result """ # Let the Python binding know we are at master context.set_pycompss_context(context.MASTER) # Then we can import the appropriate start and stop functions from the API from pycompss.api.api import compss_start, compss_stop ############################################################## # INITIALIZATION ############################################################## # Initial dictionary with the user defined parameters all_vars = { 'log_level': log_level, 'debug': debug, 'o_c': o_c, 'graph': graph, 'trace': trace, 'monitor': monitor, 'project_xml': project_xml, 'resources_xml': resources_xml, 'summary': summary, 'task_execution': task_execution, 'storage_impl': storage_impl, 'storage_conf': storage_conf, 'streaming_backend': streaming_backend, 'streaming_master_name': streaming_master_name, 'streaming_master_port': streaming_master_port, 'task_count': task_count, 'app_name': app_name, 'uuid': uuid, 'base_log_dir': base_log_dir, 'specific_log_dir': specific_log_dir, 'extrae_cfg': extrae_cfg, 'comm': comm, 'conn': conn, 'master_name': master_name, 'master_port': master_port, 'scheduler': scheduler, 'jvm_workers': jvm_workers, 'cpu_affinity': cpu_affinity, 'gpu_affinity': gpu_affinity, 'fpga_affinity': fpga_affinity, 'fpga_reprogram': fpga_reprogram, 'profile_input': profile_input, 'profile_output': profile_output, 'scheduler_config': scheduler_config, 'external_adaptation': external_adaptation, 'propagate_virtual_environment': propagate_virtual_environment, 'mpi_worker': mpi_worker } # Check the provided flags flags, issues = check_flags(all_vars) if not flags: print_flag_issues(issues) return None # Prepare the environment env_vars = prepare_environment(False, o_c, storage_impl, app, debug, trace, mpi_worker) all_vars.update(env_vars) monitoring_vars = prepare_loglevel_graph_for_monitoring( monitor, graph, debug, log_level) all_vars.update(monitoring_vars) if RUNNING_IN_SUPERCOMPUTER: updated_vars = updated_variables_in_sc() all_vars.update(updated_vars) to_update = prepare_tracing_environment(all_vars['trace'], all_vars['extrae_lib'], all_vars['ld_library_path']) all_vars['trace'], all_vars['ld_library_path'] = to_update inf_vars = check_infrastructure_variables(all_vars['project_xml'], all_vars['resources_xml'], all_vars['compss_home'], all_vars['app_name'], all_vars['file_name'], all_vars['external_adaptation']) all_vars.update(inf_vars) create_init_config_file(**all_vars) ############################################################## # RUNTIME START ############################################################## # Runtime start compss_start(log_level, all_vars['trace'], True) # Setup logging binding_log_path = get_log_path() log_path = os.path.join(all_vars['compss_home'], 'Bindings', 'python', str(all_vars['major_version']), 'log') set_temporary_directory(binding_log_path) logging_cfg_file = get_logging_cfg_file(log_level) init_logging(os.path.join(log_path, logging_cfg_file), binding_log_path) logger = logging.getLogger("pycompss.runtime.launch") logger.debug('--- START ---') logger.debug('PyCOMPSs Log path: %s' % log_path) logger.debug("Starting storage") persistent_storage = master_init_storage(all_vars['storage_conf'], logger) logger.debug("Starting streaming") streaming = init_streaming(all_vars['streaming_backend'], all_vars['streaming_master_name'], all_vars['streaming_master_port']) saved_argv = sys.argv sys.argv = args # Execution: with event(APPLICATION_RUNNING_EVENT, master=True): if func is None or func == '__main__': if IS_PYTHON3: exec(open(app).read()) else: execfile(app) # noqa result = None else: if IS_PYTHON3: import importlib.util spec = importlib.util.spec_from_file_location( all_vars['file_name'], app) # noqa: E501 imported_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(imported_module) # noqa else: import imp # noqa imported_module = imp.load_source(all_vars['file_name'], app) # noqa method_to_call = getattr(imported_module, func) result = method_to_call(*args, **kwargs) # Recover the system arguments sys.argv = saved_argv # Stop streaming if streaming: stop_streaming() # Stop persistent storage if persistent_storage: master_stop_storage(logger) logger.debug('--- END ---') ############################################################## # RUNTIME STOP ############################################################## # Stop runtime compss_stop() return result
def compss_main(): # type: () -> None """ PyCOMPSs main function. General call: python $PYCOMPSS_HOME/pycompss/runtime/launch.py $log_level $PyObject_serialize $storage_conf $streaming_backend $streaming_master_name $streaming_master_port $fullAppPath $application_args :return: None """ global APP_PATH # Let the Python binding know we are at master context.set_pycompss_context(context.MASTER) # Then we can import the appropriate start and stop functions from the API from pycompss.api.api import compss_start, compss_stop # See parse_arguments, defined above # In order to avoid parsing user arguments, we are going to remove user # args from sys.argv user_sys_argv = sys.argv[9:] sys.argv = sys.argv[:9] args = parse_arguments() # We are done, now sys.argv must contain user args only sys.argv = [args.app_path] + user_sys_argv # Get log_level log_level = args.log_level # Setup tracing tracing = int(args.tracing) # Start the runtime compss_start(log_level, tracing, False) # Get object_conversion boolean set_object_conversion(args.object_conversion == 'true') # Get storage configuration at master storage_conf = args.storage_configuration # Get application execution path APP_PATH = args.app_path # Setup logging binding_log_path = get_log_path() log_path = os.path.join(os.getenv('COMPSS_HOME'), 'Bindings', 'python', str(_PYTHON_VERSION), 'log') set_temporary_directory(binding_log_path) logging_cfg_file = get_logging_cfg_file(log_level) init_logging(os.path.join(log_path, logging_cfg_file), binding_log_path) logger = logging.getLogger("pycompss.runtime.launch") # Get JVM options # jvm_opts = os.environ['JVM_OPTIONS_FILE'] # from pycompss.util.jvm.parser import convert_to_dict # opts = convert_to_dict(jvm_opts) # storage_conf = opts.get('-Dcompss.storage.conf') exit_code = 0 try: if __debug__: logger.debug('--- START ---') logger.debug('PyCOMPSs Log path: %s' % binding_log_path) # Start persistent storage persistent_storage = master_init_storage(storage_conf, logger) # Start streaming streaming = init_streaming(args.streaming_backend, args.streaming_master_name, args.streaming_master_port) # Show module warnings if __debug__: show_optional_module_warnings() # MAIN EXECUTION with event(APPLICATION_RUNNING_EVENT, master=True): if IS_PYTHON3: with open(APP_PATH) as f: exec(compile(f.read(), APP_PATH, 'exec'), globals()) else: execfile(APP_PATH, globals()) # MAIN EXECUTION # Stop streaming if streaming: stop_streaming() # Stop persistent storage if persistent_storage: master_stop_storage(logger) # End if __debug__: logger.debug('--- END ---') except SystemExit as e: if e.code != 0: print('[ ERROR ]: User program ended with exitcode %s.' % e.code) print('\t\tShutting down runtime...') exit_code = e.code except SerializerException: exit_code = 1 # If an object that can not be serialized has been used as a parameter. print("[ ERROR ]: Serialization exception") exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) for line in lines: if APP_PATH in line: print('[ ERROR ]: In: %s', line) exit_code = 1 except COMPSsException as e: # Any other exception occurred print("[ ERROR ]: A COMPSs exception occurred: " + str(e)) traceback.print_exc() exit_code = 0 # COMPSs exception is not considered an error except Exception as e: # Any other exception occurred print("[ ERROR ]: An exception occurred: " + str(e)) traceback.print_exc() exit_code = 1 finally: # Stop runtime compss_stop(exit_code) sys.stdout.flush() sys.stderr.flush() sys.exit(exit_code)
def stop(sync=False): # type: (bool) -> None """ Runtime stop. :param sync: Scope variables synchronization [ True | False ] (default: False) :return: None """ from pycompss.api.api import compss_stop print("****************************************************") print("*************** STOPPING PyCOMPSs ******************") print("****************************************************") logger = logging.getLogger(__name__) if sync: sync_msg = "Synchronizing all future objects left on the user scope." print(sync_msg) logger.debug(sync_msg) from pycompss.api.api import compss_wait_on ipython = globals()['__builtins__']['get_ipython']() # import pprint # pprint.pprint(ipython.__dict__, width=1) reserved_names = ('quit', 'exit', 'get_ipython', 'APP_PATH', 'ipycompss', 'In', 'Out') raw_code = ipython.__dict__['user_ns'] for k in raw_code: obj_k = raw_code[k] if not k.startswith('_'): # not internal objects if type(obj_k) == Future: print("Found a future object: %s" % str(k)) logger.debug("Found a future object: %s" % (k, )) ipython.__dict__['user_ns'][k] = compss_wait_on(obj_k) elif k not in reserved_names and \ OT_is_pending_to_synchronize(obj_k): print("Found an object to synchronize: %s" % str(k)) logger.debug("Found an object to synchronize: %s" % (k, )) ipython.__dict__['user_ns'][k] = compss_wait_on(obj_k) else: pass else: print("Warning: some of the variables used with PyCOMPSs may") print(" have not been brought to the master.") # Stop streaming if STREAMING: stop_streaming() # Stop persistent storage if PERSISTENT_STORAGE: master_stop_storage(logger) # Emit the 0 for the APPLICATION_RUNNING_EVENT emitted on start function. emit_manual_event(0) # Stop runtime compss_stop() __clean_temp_files__() # Let the Python binding know we are not at master anymore context.set_pycompss_context(context.OUT_OF_SCOPE) print("****************************************************") logger.debug("--- END ---")
def stop(sync=False, _hard_stop=False): # type: (bool, bool) -> None """ Runtime stop. :param sync: Scope variables synchronization [ True | False ] (default: False) :param _hard_stop: Stop compss when runtime has died [ True | False ]. (default: False) :return: None """ logger = logging.getLogger(__name__) ipython = globals()["__builtins__"]["get_ipython"]() if not context.in_pycompss(): return __hard_stop__(interactive_helpers.DEBUG, sync, logger, ipython) from pycompss.api.api import compss_stop print(LINE_SEPARATOR) print("*************** STOPPING PyCOMPSs ******************") print(LINE_SEPARATOR) # Wait 5 seconds to give some time to process the remaining messages # of the STDW and check if there is some error that could have stopped # the runtime before continuing. print("Checking if any issue happened.") time.sleep(5) messages = STDW.get_messages() if messages: for message in messages: sys.stderr.write("".join((message, "\n"))) # Uncomment the following lines to see the ipython dictionary # in a structured way: # import pprint # pprint.pprint(ipython.__dict__, width=1) if sync and not _hard_stop: sync_msg = "Synchronizing all future objects left on the user scope." print(sync_msg) logger.debug(sync_msg) from pycompss.api.api import compss_wait_on reserved_names = ("quit", "exit", "get_ipython", "APP_PATH", "ipycompss", "In", "Out") raw_code = ipython.__dict__["user_ns"] for k in raw_code: obj_k = raw_code[k] if not k.startswith('_'): # not internal objects if type(obj_k) == Future: print("Found a future object: %s" % str(k)) logger.debug("Found a future object: %s" % str(k)) new_obj_k = compss_wait_on(obj_k) if new_obj_k == obj_k: print("\t - Could not retrieve object: %s" % str(k)) logger.debug("\t - Could not retrieve object: %s" % str(k)) else: ipython.__dict__["user_ns"][k] = new_obj_k elif k not in reserved_names: try: if OT.is_pending_to_synchronize(obj_k): print("Found an object to synchronize: %s" % str(k)) # noqa: E501 logger.debug("Found an object to synchronize: %s" % (k, )) # noqa: E501 ipython.__dict__["user_ns"][k] = compss_wait_on( obj_k) # noqa: E501 except TypeError: # Unhashable type: List - could be a collection if isinstance(obj_k, list): print("Found a list to synchronize: %s" % str(k)) logger.debug("Found a list to synchronize: %s" % (k, )) # noqa: E501 ipython.__dict__["user_ns"][k] = compss_wait_on( obj_k) # noqa: E501 else: print("Warning: some of the variables used with PyCOMPSs may") print(" have not been brought to the master.") # Stop streaming if STREAMING: stop_streaming() # Stop persistent storage if PERSISTENT_STORAGE: master_stop_storage(logger) # Emit the 0 for the APPLICATION_RUNNING_EVENT emitted on start function. emit_manual_event(0) # Stop runtime compss_stop(_hard_stop=_hard_stop) # Cleanup events and files release_event_manager(ipython) __clean_temp_files__() # Stop watching stdout and stderr STDW.stop_watching(clean=True) # Retrieve the remaining messages that could have been captured. last_messages = STDW.get_messages() if last_messages: for message in last_messages: print(message) # Let the Python binding know we are not at master anymore context.set_pycompss_context(context.OUT_OF_SCOPE) print(LINE_SEPARATOR) logger.debug("--- END ---")
def launch_pycompss_application( app, # type: str func, # type: typing.Optional[str] log_level="off", # type: str o_c=False, # type: bool debug=False, # type: bool graph=False, # type: bool trace=False, # type: bool monitor=-1, # type: int project_xml="", # type: str resources_xml="", # type: str summary=False, # type: bool task_execution="compss", # type: str storage_impl="", # type: str storage_conf="", # type: str streaming_backend="", # type: str streaming_master_name="", # type: str streaming_master_port="", # type: str task_count=50, # type: int app_name="", # type: str uuid="", # type: str base_log_dir="", # type: str specific_log_dir="", # type: str extrae_cfg="", # type: str comm="NIO", # type: str conn=DEFAULT_CONN, # type: str master_name="", # type: str master_port="", # type: str scheduler=DEFAULT_SCHED, # type: str jvm_workers=DEFAULT_JVM_WORKERS, # type: str cpu_affinity="automatic", # type: str gpu_affinity="automatic", # type: str fpga_affinity="automatic", # type: str fpga_reprogram="", # type: str profile_input="", # type: str profile_output="", # type: str scheduler_config="", # type: str external_adaptation=False, # type: bool propagate_virtual_environment=True, # type: bool mpi_worker=False, # type: bool worker_cache=False, # type: typing.Union[bool, str] shutdown_in_node_failure=False, # type: bool io_executors=0, # type: int env_script="", # type: str reuse_on_block=True, # type: bool nested_enabled=False, # type: bool tracing_task_dependencies=False, # type: bool trace_label="", # type: str extrae_cfg_python="", # type: str wcl=0, # type: int cache_profiler=False, # type: bool *args, **kwargs): # NOSONAR # type: (...) -> typing.Any """ Launch PyCOMPSs application from function. :param app: Application path :param func: Function :param log_level: Logging level [ "trace"|"debug"|"info"|"api"|"off" ] (default: "off") :param o_c: Objects to string conversion [ True | False ] (default: False) :param debug: Debug mode [ True | False ] (default: False) (overrides log_level) :param graph: Generate graph [ True | False ] (default: False) :param trace: Generate trace [ True | False | "scorep" | "arm-map" | "arm-ddt"] (default: False) :param monitor: Monitor refresh rate (default: None) :param project_xml: Project xml file path :param resources_xml: Resources xml file path :param summary: Execution summary [ True | False ] (default: False) :param task_execution: Task execution (default: "compss") :param storage_impl: Storage implementation path :param storage_conf: Storage configuration file path :param streaming_backend: Streaming backend (default: None) :param streaming_master_name: Streaming master name (default: None) :param streaming_master_port: Streaming master port (default: None) :param task_count: Task count (default: 50) :param app_name: Application name (default: Interactive_date) :param uuid: UUId :param base_log_dir: Base logging directory :param specific_log_dir: Specific logging directory :param extrae_cfg: Extrae configuration file path :param comm: Communication library (default: NIO) :param conn: Connector (default: DefaultSSHConnector) :param master_name: Master Name (default: "") :param master_port: Master port (default: "") :param scheduler: Scheduler (default: es.bsc.compss.scheduler.loadbalancing.LoadBalancingScheduler) :param jvm_workers: Java VM parameters (default: "-Xms1024m,-Xmx1024m,-Xmn400m") :param cpu_affinity: CPU Core affinity (default: "automatic") :param gpu_affinity: GPU Core affinity (default: "automatic") :param fpga_affinity: FPA Core affinity (default: "automatic") :param fpga_reprogram: FPGA reprogram command (default: "") :param profile_input: Input profile (default: "") :param profile_output: Output profile (default: "") :param scheduler_config: Scheduler configuration (default: "") :param external_adaptation: External adaptation [ True | False ] (default: False) :param propagate_virtual_environment: Propagate virtual environment [ True | False ] (default: False) :param mpi_worker: Use the MPI worker [ True | False ] (default: False) :param worker_cache: Use the worker cache [ True | int(size) | False] (default: False) :param shutdown_in_node_failure: Shutdown in node failure [ True | False] (default: False) :param io_executors: <Integer> Number of IO executors :param env_script: <String> Environment script to be sourced in workers :param reuse_on_block: Reuse on block [ True | False] (default: True) :param nested_enabled: Nested enabled [ True | False] (default: False) :param tracing_task_dependencies: Include task dependencies in trace [ True | False] (default: False) :param trace_label: <String> Add trace label :param extrae_cfg_python: <String> Extrae configuration file for the workers :param wcl: <Integer> Wallclock limit. Stops the runtime if reached. 0 means forever. :param cache_profiler: Use the cache profiler [ True | False] (default: False) :param args: Positional arguments :param kwargs: Named arguments :return: Execution result """ # Check that COMPSs is available if "COMPSS_HOME" not in os.environ: # Do not allow to continue if COMPSS_HOME is not defined raise PyCOMPSsException( "ERROR: COMPSS_HOME is not defined in the environment") # Let the Python binding know we are at master context.set_pycompss_context(context.MASTER) # Then we can import the appropriate start and stop functions from the API from pycompss.api.api import compss_start, compss_stop ############################################################## # INITIALIZATION ############################################################## if debug: log_level = "debug" # Initial dictionary with the user defined parameters all_vars = parameters_to_dict( log_level, debug, o_c, graph, trace, monitor, project_xml, resources_xml, summary, task_execution, storage_impl, storage_conf, streaming_backend, streaming_master_name, streaming_master_port, task_count, app_name, uuid, base_log_dir, specific_log_dir, extrae_cfg, comm, conn, master_name, master_port, scheduler, jvm_workers, cpu_affinity, gpu_affinity, fpga_affinity, fpga_reprogram, profile_input, profile_output, scheduler_config, external_adaptation, propagate_virtual_environment, mpi_worker, worker_cache, shutdown_in_node_failure, io_executors, env_script, reuse_on_block, nested_enabled, tracing_task_dependencies, trace_label, extrae_cfg_python, wcl, cache_profiler) # Save all vars in global current flags so that events.py can restart # the notebook with the same flags export_current_flags(all_vars) # Check the provided flags flags, issues = check_flags(all_vars) if not flags: print_flag_issues(issues) return None # Prepare the environment env_vars = prepare_environment(False, o_c, storage_impl, app, debug, trace, mpi_worker) all_vars.update(env_vars) monitoring_vars = prepare_loglevel_graph_for_monitoring( monitor, graph, debug, log_level) all_vars.update(monitoring_vars) if RUNNING_IN_SUPERCOMPUTER: updated_vars = updated_variables_in_sc() all_vars.update(updated_vars) to_update = prepare_tracing_environment(all_vars["trace"], all_vars["extrae_lib"], all_vars["ld_library_path"]) all_vars["trace"], all_vars["ld_library_path"] = to_update inf_vars = check_infrastructure_variables(all_vars["project_xml"], all_vars["resources_xml"], all_vars["compss_home"], all_vars["app_name"], all_vars["file_name"], all_vars["external_adaptation"]) all_vars.update(inf_vars) create_init_config_file(**all_vars) ############################################################## # RUNTIME START ############################################################## # Runtime start compss_start(log_level, all_vars["trace"], True) # Setup logging binding_log_path = get_log_path() log_path = os.path.join(all_vars["compss_home"], "Bindings", "python", str(all_vars["major_version"]), "log") set_temporary_directory(binding_log_path) logging_cfg_file = get_logging_cfg_file(log_level) init_logging(os.path.join(log_path, logging_cfg_file), binding_log_path) logger = logging.getLogger("pycompss.runtime.launch") logger.debug("--- START ---") logger.debug("PyCOMPSs Log path: %s" % log_path) if storage_impl and storage_conf: logger.debug("Starting storage") persistent_storage = master_init_storage(all_vars["storage_conf"], logger) else: persistent_storage = False logger.debug("Starting streaming") streaming = init_streaming(all_vars["streaming_backend"], all_vars["streaming_master_name"], all_vars["streaming_master_port"]) saved_argv = sys.argv sys.argv = list(args) # Execution: with event_master(APPLICATION_RUNNING_EVENT): if func is None or func == "__main__": if IS_PYTHON3: exec(open(app).read()) else: execfile(app) # type: ignore result = None else: if IS_PYTHON3: from importlib.machinery import SourceFileLoader # noqa imported_module = SourceFileLoader( all_vars["file_name"], app).load_module() # type: ignore else: import imp # noqa imported_module = imp.load_source(all_vars["file_name"], app) # noqa method_to_call = getattr(imported_module, func) try: result = method_to_call(*args, **kwargs) except TypeError: result = method_to_call() # Recover the system arguments sys.argv = saved_argv # Stop streaming if streaming: stop_streaming() # Stop persistent storage if persistent_storage: master_stop_storage(logger) logger.debug("--- END ---") ############################################################## # RUNTIME STOP ############################################################## # Stop runtime compss_stop() clean_log_configs() return result