class FuturesList(list): def _create_executor(self): if not self.executor: from lithops import FunctionExecutor self.executor = FunctionExecutor(config=self.config) def _extend_futures(self, fs): for fut in self: fut._produce_output = False if not hasattr(self, 'alt_list'): self.alt_list = [] self.alt_list.extend(self) self.alt_list.extend(fs) self.clear() self.extend(fs) def map(self, map_function, sync=False, **kwargs): self._create_executor() if sync: self.executor.wait(self) fs = self.executor.map(map_function, self, **kwargs) self._extend_futures(fs) return self def map_reduce(self, map_function, reduce_function, sync=False, **kwargs): self._create_executor() if sync: self.executor.wait(self) fs = self.executor.map_reduce(map_function, self, reduce_function, **kwargs) self._extend_futures(fs) return self def wait(self, **kwargs): self._create_executor() fs_tt = self.alt_list if hasattr(self, 'alt_list') else self return self.executor.wait(fs_tt, **kwargs) def get_result(self, **kwargs): self._create_executor() fs_tt = self.alt_list if hasattr(self, 'alt_list') else self return self.executor.get_result(fs_tt, **kwargs) def __reduce__(self): self.executor = None return super().__reduce__()
class Popen(object): method = 'cloud' def __init__(self, process_obj): util._flush_std_streams() self.returncode = None self._executor = FunctionExecutor() self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=ALWAYS): if self.returncode is None: self._executor.wait([self.sentinel], return_when=flag) if self.sentinel.ready or self.sentinel.done: self.returncode = 0 if self.sentinel.error: self.returncode = 1 return self.returncode def wait(self, timeout=None): if self.returncode is None: wait = self._executor.wait if not wait([self.sentinel], timeout=timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(ALWAYS if timeout == 0.0 else ALL_COMPLETED) return self.returncode def terminate(self): if self.returncode is None: try: self.sentinel.cancel() except NotImplementedError: pass def _launch(self, process_obj): fn_args = [*process_obj._args, *process_obj._kwargs] self.sentinel = self._executor.call_async(process_obj._target, fn_args)
class CloudProcess: def __init__(self, group=None, target=None, name=None, args=None, kwargs=None, *, daemon=None): assert group is None, 'process grouping is not implemented' count = next(_process_counter) if args is None: args = () if kwargs is None: kwargs = {} self._config = {} self._identity = count self._parent_pid = os.getpid() self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or (type(self).__name__ + '-' + str(self._identity)) if daemon is not None: self.daemon = daemon lithops_config = mp_config.get_parameter(mp_config.LITHOPS_CONFIG) self._executor = FunctionExecutor(**lithops_config) self._forked = False self._sentinel = object() self._remote_logger = None self._redis = util.get_redis_client() def run(self): """ Method to be run in sub-process; can be overridden in sub-class """ if self._target: self._target(*self._args, **self._kwargs) def start(self): """ Start child process """ assert not self._forked, 'cannot start a process twice' assert self._parent_pid == os.getpid( ), 'can only start a process object created by current process' cloud_worker = CloudWorker(self._target) if mp_config.get_parameter(mp_config.STREAM_STDOUT): stream = self._executor.executor_id logger.debug( 'Log streaming enabled, stream name: {}'.format(stream)) self._remote_logger = util.RemoteLoggingFeed(stream) self._remote_logger.start() cloud_worker.log_stream = stream extra_env = mp_config.get_parameter(mp_config.ENV_VARS) self._executor.call_async(cloud_worker, { 'args': self._args, 'kwargs': self._kwargs }, extra_env=extra_env) del self._target, self._args, self._kwargs self._forked = True def terminate(self): """ Terminate process; sends SIGTERM signal or uses TerminateProcess() """ raise NotImplementedError() def join(self, timeout=None): """ Wait until child process terminates """ assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._forked, 'can only join a started process' try: self._executor.wait() exception = None except Exception as e: exception = e finally: if self._remote_logger: self._remote_logger.stop() if exception: raise exception def is_alive(self): """ Return whether process is alive """ raise NotImplementedError() @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): """ Return whether process is a daemon """ return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): """ Set whether process is a daemon """ assert not self._forked, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): """ Set authorization key of process """ self._config['authkey'] = authkey @property def exitcode(self): """ Return exit code of process or `None` if it has yet to stop """ raise NotImplementedError() @property def ident(self): """ Return identifier (PID) of process or `None` if it has yet to start """ raise NotImplementedError() pid = ident @property def sentinel(self): """ Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. """ try: return self._sentinel except AttributeError: raise ValueError("process not started")
class CloudProcess: def __init__(self, group=None, target=None, name=None, args=None, kwargs=None, *, daemon=None): assert group is None, 'process grouping is not implemented' if args is None: args = () if kwargs is None: kwargs = {} self._config = {} self._parent_pid = os.getpid() self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or (type(self).__name__ + '-' + str(next(_process_counter))) self._pid = None if daemon is not None: self.daemon = daemon lithops_config = mp_config.get_parameter(mp_config.LITHOPS_CONFIG) self._executor = FunctionExecutor(**lithops_config) self._future = None self._sentinel = object() self._remote_logger = None self._redis = util.get_redis_client() def run(self): """ Method to be run in sub-process; can be overridden in sub-class """ if self._target: self._target(*self._args, **self._kwargs) def start(self): """ Start child process """ assert not self._pid, 'cannot start a process twice' assert self._parent_pid == os.getpid( ), 'can only start a process object created by current process' self._remote_logger, stream = util.setup_log_streaming(self._executor) extra_env = mp_config.get_parameter(mp_config.ENV_VARS) process_name = '-'.join([ 'CloudProcess', str(next(_process_counter)), self._target.__name__ ]) self._future = self._executor.call_async(cloud_process_wrapper, { 'func': self._target, 'data': { 'args': self._args, 'kwargs': self._kwargs }, 'initializer': None, 'initargs': None, 'name': process_name, 'log_stream': stream, 'unpack_args': True }, extra_env=extra_env) self._pid = '/'.join([ self._future.executor_id, self._future.job_id, self._future.call_id ]) del self._target, self._args, self._kwargs def terminate(self): """ Terminate process; sends SIGTERM signal or uses TerminateProcess() """ raise NotImplementedError() def join(self, timeout=None): """ Wait until child process terminates """ assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._pid, 'can only join a started process' exception = None try: self._executor.wait(fs=[self._future]) except Exception as e: exception = e finally: if self._remote_logger: self._remote_logger.stop() util.export_execution_details([self._future], self._executor) if exception: raise exception def is_alive(self): """ Return whether process is alive """ raise NotImplementedError() @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): """ Return whether process is a daemon """ return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): """ Set whether process is a daemon """ assert not self._pid, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): """ Set authorization key of process """ self._config['authkey'] = authkey @property def exitcode(self): """ Return exit code of process or `None` if it has yet to stop """ raise NotImplementedError() @property def ident(self): """ Return identifier (PID) of process or `None` if it has yet to start """ return self._pid pid = ident @property def sentinel(self): """ Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. """ try: return self._sentinel except AttributeError: raise ValueError("process not started")
class CloudProcess: def __init__(self, group=None, target=None, name=None, args=None, kwargs=None, *, daemon=None): assert group is None, 'process grouping is not implemented' count = next(_process_counter) if args is None: args = () if kwargs is None: kwargs = {} self._config = {} self._identity = count self._parent_pid = os.getpid() self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = name or (type(self).__name__ + '-' + str(self._identity)) if daemon is not None: self.daemon = daemon lithops_config = mp_config.get_parameter(mp_config.LITHOPS_CONFIG) self._executor = FunctionExecutor(**lithops_config) self._forked = False self._sentinel = object() self._logger_thread = None self._redis = util.get_redis_client() def _logger_monitor(self, stream): logger.debug('Starting logger monitor thread') redis_pubsub = self._redis.pubsub() redis_pubsub.subscribe(stream) while True: msg = redis_pubsub.get_message(ignore_subscribe_messages=True, timeout=10) if msg is None: continue sys.stdout.write(msg['data'].decode('utf-8')) def run(self): """ Method to be run in sub-process; can be overridden in sub-class """ if self._target: self._target(*self._args, **self._kwargs) def start(self): """ Start child process """ assert not self._forked, 'cannot start a process twice' assert self._parent_pid == os.getpid( ), 'can only start a process object created by current process' # sig = inspect.signature(self._target) # pos_args = [param.name for _, param in sig.parameters.items() if param.default is inspect.Parameter.empty] # fmt_args = dict(zip(pos_args, self._args)) # fmt_args.update(self._kwargs) cloud_worker = CloudWorker(self._target) extra_env = {} if mp_config.get_parameter(mp_config.STREAM_STDOUT): stream = self._executor.executor_id logger.debug( 'Log streaming enabled, stream name: {}'.format(stream)) cloud_worker.log_stream = stream self._logger_thread = threading.Thread(target=self._logger_monitor, args=(stream, )) self._logger_thread.daemon = True self._logger_thread.start() self._executor.call_async(cloud_worker, { 'args': self._args, 'kwargs': self._kwargs }, extra_env=extra_env) del self._target, self._args, self._kwargs self._forked = True def terminate(self): """ Terminate process; sends SIGTERM signal or uses TerminateProcess() """ raise NotImplementedError() def join(self, timeout=None): """ Wait until child process terminates """ assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._forked, 'can only join a started process' self._executor.wait() def is_alive(self): """ Return whether process is alive """ raise NotImplementedError() @property def name(self): return self._name @name.setter def name(self, name): assert isinstance(name, str), 'name must be a string' self._name = name @property def daemon(self): """ Return whether process is a daemon """ return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): """ Set whether process is a daemon """ assert not self._forked, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter def authkey(self, authkey): """ Set authorization key of process """ self._config['authkey'] = authkey @property def exitcode(self): """ Return exit code of process or `None` if it has yet to stop """ raise NotImplementedError() @property def ident(self): """ Return identifier (PID) of process or `None` if it has yet to start """ raise NotImplementedError() pid = ident @property def sentinel(self): """ Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. """ try: return self._sentinel except AttributeError: raise ValueError("process not started")